最终成果:
我的代码:
from bs4 import BeautifulSoup
import requests
url = 'http://bj.xiaozhu.com/'
urls = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1, 14)]
urls.insert(0, url) # 得到13个网页的网址
info = [] # 保存得到的信息
def print_gender(class_name): # 得到房主的性别
if class_name == 'member_ico1':
return '女'
if class_name == 'member_ico':
return '男'
def get_attractions(url): #得到单个租房网址上的信息
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
title = soup.select('div.pho_info > h4')[0].text.strip()
addr = soup.select('div.pho_info > p')[0].get('title')
price = soup.select('div.day_l > span')[0].text
house_pic1 = soup.select('div.pho_show_big > div > img')[0].get('src')
host_pic = soup.select('div.member_pic > a > img')[0].get('src')
host_name = soup.select('div.w_240 > h6 > a')[0].text
host_gender = soup.select('div.member_pic > div')[0].get('class')[0]
data = {
'title':title,
'addr':addr,
'price':price,
'house_pic1':house_pic1,
'host_pic' :host_pic,
'host_name':host_name,
'host_gender':print_gender(host_gender),
}
info.append(data)
print(data)
links = []
def get_link(urls): #得到所要抓取网页的链接
for url in urls:
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
link = soup.select('ul > li > a.resule_img_a')
for i in range(len(link)):
links.append(link[i].get('href'))
get_link(urls)
for url in links: #循环得到每个网页上所有链接网页上的信息
get_attractions(url)
总结:
1. 获取标签信息的路径的方法,查看单个标签的唯一性路径
2.利用字符串的strip函数,去除字符串两边的空格,回车和Tab键
3.利用函数使代码清晰化