成果展示
我的代码
from bs4 import BeautifulSoup
import requests
import time
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
'Cookie': 'f=n; ipcity=sz%7C%u6DF1%u5733; myfeet_tooltip=end; f=n; id58=c5/nn1eG5BQ23S7cISNUAg==; bj58_id58s="aGJ0WERrMlAxWUFDMTgxMQ=="; sessionid=c780a61d-ec3b-4f40-bc4b-55a846bae0bd; f=n; als=0; bj58_new_session=0; bj58_init_refer=""; bj58_new_uv=1; 58tj_uuid=ebf8e987-94c8-4bbd-8f6a-e5920002849b; new_session=0; new_uv=1; utm_source=; spm=; init_refer='
}
#获取详情页链接函数
def get_urls(start=1,end=1):
urls_list = ['http://bj.58.com/pbdn/0/pn{}/'.format(str(i)) for i in range(start, end+1)] # 列表页链接
url_detail = [] #储存详情页链接
for single_list in urls_list:
wb_data = requests.get(single_list, headers=headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
urls = soup.select('tr > td.img > a')
#print(urls)
for url in urls:
if 'jump.zhineng.58.com' not in url.get('href'): #通过判断链接中是否含有jump.zhineng.58.com,排除推广商品
url_detail.append(url.get('href')) #将链接元素保存至列表
return url_detail
#获取详情信息
def get_info(url,data=None):
for each_url in url:
wb_data = requests.get(each_url, headers=headers)
#time.sleep(2) #防止请求频率过高,被网站反爬虫,每两秒请求一次
soup = BeautifulSoup(wb_data.text, 'lxml')
cates = [] # 储存标签
# print(soup)
title = soup.select('h1')[0].get_text()
classify = soup.find_all('span', 'crb_i')[-1].get_text(strip=True) # 取的文本前有一行空白,使get_text方法中的参数strip=True,去掉空白
price = soup.select('span.price_now > i')[0].get_text()
area = soup.select('div.palce_li > span > i')[0].get_text()
review = soup.select('div.box_left_top > p > span.look_time')[0].get_text()
want_p = soup.select('div.box_left_top > p > span.want_person')[0].get_text()
cate = soup.select('div.biaoqian_li')
for item in cate:
cates.append(list(item.stripped_strings))
#print(title, classify, price, area, review, want_p, cates)
data = {
'title': title,
'classify': classify,
'price': price,
'area': area,
'review': review,
'want_p': want_p,
'cates': cates
}
print(data)
url=get_urls(1,10)
get_info(url)
总结:
1.将第一周课程学的知识整合在一起,成功爬取了58转转的商品信息,加深了各种知识的理解;
2.要多写代码,多尝试各种情况,不断的踩坑,填坑,在解决的过程中,学到更多教程里没涉及到的知识;