本程序的目的:每天早上爬取重要的金十财经数据,并发送到手机上,早起就可以看到。
为啥要看财经数据?呵呵哒。
环境准备:
1.python 3.8
2.一个linux服务器,24小时不关机
3.一个开启第三方授权的QQ邮箱
4.微信...
程序代码如下:
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by: Niuyoucai
# 2022.04.09 Ver 1.0
# https://rili.jin10.com爬虫
import requests, re, sys
from datetime import *
from bs4 import BeautifulSoup
import smtplib
import email.message
class Spider(object):
'''
用于jin10网站爬虫的专用类
Created by: Niuyoucai
2022.04.09 Ver 1.0
'''
def __init__(self):
self.headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.141 Safari/537.36'
}
self.root = 'https://rili.jin10.com/day/'
self.session = requests.session()
self.html = ''
self.mailtext = ''
def getHtmlContent(self, dt):
'''读取网页文本信息用于解析
'''
self.dt = dt
url = self.root + dt
try:
response = self.session.get(url, headers=self.headers)
response.encoding = 'utf-8'
self.html = response.text
self.getDatas()
self.getEvents()
self.getHolidays()
except requests.exceptions.RequestException as e:
self.html = ''
def getDatas(self):
'''获取重要经济数据
'''
stpos = self.html.find('经济数据')
topos = self.html.find('事件')
html = self.html[stpos:topos]
soup = BeautifulSoup(html, 'html.parser')
trs = soup.find_all('tr', attrs='jin-table-row')
info = '[' + self.dt + ']' + '<br>' + '<b>重要经济数据</b>' + '<br>'
for tr in trs:
if 'is-important' not in str(tr):
continue
tds = tr.find_all('td')
for col in tds:
data = re.sub('<.*?>', '', str(col)).strip()
info += data
info += ' '
info += '<br>'
self.mailtext += info
def getEvents(self):
'''获取重要事件
'''
stpos = self.html.find('事件')
topos = self.html.find('假期')
html = self.html[stpos:topos]
soup = BeautifulSoup(html, 'html.parser')
trs = soup.find_all('tr', attrs='jin-table-row')
info = '<b>重要事件</b>' + '<br>'
for tr in trs:
if 'is-important' not in str(tr):
continue
tds = tr.find_all('td')
for col in tds:
data = re.sub('<.*?>', '', str(col)).strip()
info += data
info += ' '
info += '<br>'
self.mailtext += info
def getHolidays(self):
'''获取假期
'''
# html = self.getHtmlContent(self.root)
stpos = self.html.find('假期')
html = self.html[stpos:]
soup = BeautifulSoup(html, 'html.parser')
trs = soup.find_all('tr', attrs='jin-table-row current-countdown')
info = '<b>假期</b>' + '<br>'
for tr in trs:
tds = tr.find_all('td')
for col in tds:
data = re.sub('<.*?>', '', str(col)).strip()
info += data
info += ' '
info += '<br>'
self.mailtext += info
def mailqq(self):
#没有爬取到内容时不发邮件
if self.mailtext == '':
return
smtpObj = smtplib.SMTP_SSL('smtp.qq.com',465)
msg = email.message.EmailMessage()
msg.set_type('text/html')
msg["From"] = "3285219560@qq.com"
msg["To"] = "3285219560@qq.com"
msg["Subject"] = "每日交易提醒"
text = "<html><body>" + self.mailtext + "</body></html>"
try:
smtpObj.login('3285219560@qq.com', 'qq邮箱的第三方授权码')
#msg.set_content(content)
msg.add_alternative(text, subtype='html')
smtpObj.send_message(msg)
except smtplib.SMTPException as e:
print(repr(e))
finally:
smtpObj.quit()
def main():
#参数检查
if len(sys.argv) > 2:
print('用法:', sys.argv[0], '[数据日期YYYY-MM-dd]')
return
#缺省爬取今明两天的数据
elif len(sys.argv) == 1:
myspider = Spider()
dt = datetime.strftime(datetime.now(), '%Y-%m-%d')
myspider.getHtmlContent(dt)
dt = datetime.strftime(datetime.now() + timedelta(days=1), '%Y-%m-%d')
myspider.getHtmlContent(dt)
myspider.mailqq()
#参数指定日期单独一天数据爬取
elif len(sys.argv) == 2:
myspider = Spider()
dt = sys.argv[1]
myspider.getHtmlContent(dt)
myspider.mailqq()
if __name__ == '__main__':
main()
程序说明
金十财经日历有三个内容,财经数据、事件、假期。
程序分别用三个函数爬取。但只摘取重要数据和事件。
获取的内容编辑成html邮件,用qq邮箱给自己发一封信。
QQ邮箱的第三方授权要开启,去qq邮箱的网页版上开启,方法这里不赘婿。
用微信绑定qq邮箱,开启qq邮件提醒功能,这样有邮件到达邮箱,微信就会"咚"地提示一声,然后打开看一下,是这个样子的:
自动执行
linux服务器,crontab设定
58 6 * * 1-5 /home/niu/bin/jin10.py
周一到周五早上6点58分执行一次。可以当闹钟用。