github源码下载
- 首先声明一点,由于豆瓣的api有访问次数限制,应该是一分钟内只允许40次请求,所以需要设置延时。
- 超出限制后,豆瓣会反爬虫,继续请求ip检测会出现异常,所以需要模拟登陆。
- 我这里模拟登录信息用的是最简单的,我们在网页登录后获取cookies信息,以保持登录状态:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
cookies = {'cookie': 'bid=lYQOsRcej_8; __guid=236236167.1056829666077886000.1525765977089.4163; __yadk_uid=oTZbiJ2I8VYoUXCoZzHcWoBroPcym2QB; gr_user_id=24156fa6-1963-48f2-8b87-6a021d165bde; viewed="26708119_24294210_24375031"; ps=y; __utmt=1; _vwo_uuid_v2=DE96132378BF4399896F28BD0E9CFC4FD|8f3c433c345d866ad9849be60f1fb2a0; ue="2287093698@qq.com"; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1527272795%2C%22https%3A%2F%2Faccounts.douban.com%2Fsafety%2Funlock_sms%2Fresetpassword%3Fconfirmation%3Dbf9474e931a2fa9a%26alias%3D%22%5D; _ga=GA1.2.262335411.1525765981; _gid=GA1.2.856273377.1527272802; dbcl2="62325063:TQjdVXw2PtM"; ck=csZ5; monitor_count=11; _pk_id.100001.8cb4=7b30f754efe8428f.1525765980.15.1527272803.1527269181.; _pk_ses.100001.8cb4=*; push_noty_num=0; push_doumail_num=0; __utma=30149280.262335411.1525765981.1527266658.1527269182.62; __utmb=30149280.9.10.1527269182; __utmc=30149280; __utmz=30149280.1527266658.61.22.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/login; __utmv=30149280.6232; ap=1; ll="108289"'}
#连接请求URL,请求的是登录以后的页面
res = requests.get('https://movie.douban.com/top250?start='+ start, headers = headers, cookies = cookies)
#获取相应文本,也就是对应html页面的文本信息
html = res.text
print(html)
def getMovieList(start):
res = requests.get('https://movie.douban.com/top250?start='+ start, headers = headers, cookies = cookies)
html = res.text
# print(html)
reg = r'<div class="item">.*?<a href="(.*?)">.*?<span class="title">(.*?)</span>.*?<p class="">(.*?) .*?<br>.*?(.*?) / (.*?) / (.*?)</p>'
reg += r'.*?<div class="star">.*?property="v:average">(.*?)</span>.*?<span>(.*?)</span>'
reg += r'.*?<span class="inq">(.*?)</span>'
return re.findall(reg, html, re.S)
- 在列表页获取到对应电影的url,然后进入具体电影页面,爬取相关信息。
def getMovieContent(url,movieId):
res = requests.get(url, headers = headers, cookies = cookies)
html = res.text
# print(html)
#匹配电影的所有演员信息
reg = r'<span class="actor">(.*?)<br/>'
actors = re.findall(reg, html)
#存在电影获奖信息爬取,否则空串匹配
if '<div class="hd">' in html:
prize = '<div class="hd">(.*?)'
else:
prize = ''
#存在别名则爬取,否则空串匹配
if "又名:" in html:
alias = '</span> (.*?)<br/>'
else:
alias = ''
#爬取电影的海报链接、上映日期、时长、别名、摘要、短评链接等等
reg = r'<a class="nbgnbg".*?<img src="(.*?)".*?'
reg += r'.*?<span property="v:initialReleaseDate" content="(.*?)".*?<span property="v:runtime" content="(.*?)">.*?'+alias
reg += r'.*?<span property="v:summary".*?>(.*?)</span>.*?'+ prize +'<div id="recommendations" class="">'
reg += r'.*?<a class="comment_btn j a_collect_btn".*?<a href="(.*?)">'
if prize != '':
if alias != '':
poster, time, movieLength, otherName, summary, award, commentLink = re.findall(reg, html, re.S)[0]
else:
poster, time, movieLength, summary, award, commentLink = re.findall(reg, html, re.S)[0]
otherName = ""
reg = r'<li>.*?<a href=".*?">(.*?)</a>.*?<li>(.*?)</li>'
for awardName, awardType in re.findall(reg, award, re.S):
cursor.execute("insert into award(movieId, name, type) values('{}', '{}', '{}')".format(
movieId, (""+awardName).replace("'", r"\'"), (""+awardType).replace("'", r"\'")))
else:
resultList = re.findall(reg, html, re.S)
if len(resultList) != 0:
if alias != '':
poster, time, movieLength, otherName, summary, commentLink = resultList[0]
else:
poster, time, movieLength, summary, commentLink = resultList[0]
otherName = ""
else:
return
# print(poster, actors, time, movieLength, otherName, summary, award, commentLink)
if len(otherName) != 0:
updateSql = "update movie set poster='{}', time='{}', movieLength='{}',otherName='{}', summary='{}', commentLink='{}' where id = '{}'".format(
poster, (""+time).strip("\n").strip(), movieLength, (""+otherName).replace("'", r"\'"),(""+summary).strip().replace("'", r"\'"),
(""+commentLink).replace("'", r"\'"), movieId)
else:
updateSql = "update movie set poster='{}', time='{}', movieLength='{}', summary='{}', commentLink='{}' where id = '{}'".format(
poster, ("" + time).strip("\n").strip(), movieLength,
("" + summary).strip().replace("'", r"\'"), ("" + commentLink).replace("'", r"\'"), movieId
)
#更新电影信息
cursor.execute(updateSql)
# print(award)
#存储电影的所有演员信息,还有她的介绍页面URL
reg = r'<a href="(.*?)" rel="v:starring">(.*?)</a>'
for link, name in re.findall(reg, str(actors)):
cursor.execute("insert into actor(movieId, link, name) values('{}', '{}', '{}')".format(
movieId, (""+link).replace("'", r"\'"), (str(name))))
#存储该电影短评论信息,以及评论人名称,评论日期
for userName, time, commentContent in getComment(commentLink):
cursor.execute("insert into comment(movieId, userName, content, time) values('{}', '{}', '{}', '{}')".format(
movieId, (""+userName).replace("'", r"\'"), (""+commentContent).replace("'", r"\'"), (""+time).strip("\n").strip()))
conn.commit()
def getComment(url):
res = requests.get(""+url, headers = headers, cookies = cookies)
html = res.text
#三个参数:评论人、评论日期、评论内容
reg = r'<span class="comment-info">.*?class="">(.*?)</a>.*?<span class="comment-time.*?>(.*?)</span>'
reg += r'.*?<p class="">(.*?)</p>'
return re.findall(reg, html, re.S)
def startUpMovie():
count = 0
#一共250部,每页25部,分十次爬取
for i in range(0,10):
for link, title, director, age, country, type, score, evaluationNum, note in getMovieList(str(25*i)):
print('正在存储----{}'.format(""+title))
# print(link, title, director, age, country, type, score, evaluationNum, note)
cursor.execute("insert into movie(link, title, director, age, country, type, score, evaluationNum, note)"
" values('{}', '{}','{}', '{}', '{}', '{}','{}', '{}', '{}')".format(
link, (""+title), (""+director[6:]).strip("导演: ").strip().replace("'", r"\'"), (""+age).strip("\n").strip(),
country, (""+type).strip("\n").strip().replace("'", r"\'"), score, evaluationNum[0:-3], note)
)
getMovieContent(link, cursor.lastrowid)
conn.commit()
count += 1
#每爬取两部电影,休眠7秒,以防止ip被禁!
if count % 2 == 0:
time.sleep(7)
print("num:'{}'".format(count))
#启动爬虫函数
startUpMovie()