import requests
from lxml import etree
import xlwt
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
all_info_list = []
def get_info(url):
res = requests.get(url, headers = header)
html = etree.HTML(res.text)
infos = html.xpath('//div[@class="col1"]/div')
for info in infos:
try:
id = info.xpath('div[1]/a[2]/h2/text()')[0]
content1 = info.xpath('a[1]/div/span[1]')[0]
content = content1.xpath('string(.)').strip()
laugh_num = info.xpath('div[2]/span[1]/i/text()')
thumbs_up = info.xpath('a[2]/div/div/div/text()')
info_list = [id, content, laugh_num, thumbs_up]
all_info_list.append(info_list)
except IndexError:
pass
if __name__ == '__main__':
book = xlwt.Workbook(encoding='utf-8')
sheet = book.add_sheet('Sheet1')
column = ['id', 'content', 'laugh_num', 'thumbs_up']
for t in range(4):
sheet.write(0, t, column[t])
urls = [f'https://www.qiushibaike.com/text/page/{str(i)}/' for i in range(1,14)]
for url in urls:
get_info(url)
i = 1
for list in all_info_list:
j = 0
for data in list:
sheet.write(i, j, data)
j = j + 1
i = i + 1
book.save('C:/Users/Harron/Desktop/test1.xls')