二十六、 分布式爬虫– 实战– 链家网全国房源信息爬取(2)
续上例,house.py 示例代码
import scrapy
from ..items import LjItem
import re
import json
class HouseSpider(scrapy.Spider):
name = 'house'
allowed_domains = ['lianjia.com']
start_urls =['https://www.lianjia.com/city/']
def parse(self, response):
city_tags =response.css(".city_list_ul a")
for city_tag in city_tags:
city_url =city_tag.css("::attr(href)").get()
city_name =city_tag.css("::text").get()
item = LjItem(city=city_name)
yieldscrapy.Request(city_url+"ershoufang/", callback='parse_region_list',meta={"item": item})
def parse_region_list(self, response):
# 解析行政区的url
item = response.meta.get('item')
region_tags = response.css("div[data-role='ershoufang'].a")
for region_tag in region_tags:
region_url =region_tag.css("::attr(href)").get()
region_name =region_tag.css("::text").get()
item['region'] = region_name
yield scrapy.Request(response.urljoin(region_url),callback="parse_house_list", meta={"item": item})
def parse_house_list(self, response):
# 解析房源列表
item =response.meta.get("item")
detail_urls =response.css(".sellListContent li>a::attr(href)").getall()
for detail_url in detail_urls:
result =re.search(r'/ershoufang/\d+\.html', detail_url)
if result:
yield scrapy.Request(detail_url,callback="parse_house", meta={"item": item})
# 翻页
page_data =response.css("div[comp-module='page']::attr(page-data)").get()
totalPage =json.loads(page_data)['totalPage']
for x in range(2, totalPage+1):
yield scrapy.Request(response.url +"pg" + str(x), callback="")
def parse_house(self, response):
# 解析房源详情页
上一篇文章 第六章 Scrapy框架(二十五) 2020-03-27 地址:
https://www.jianshu.com/p/8dc05993290e
下一篇文章 第六章 Scrapy框架(二十七) 2020-03-29 地址:
https://www.jianshu.com/p/b15d4e242708
以上资料内容来源网络,仅供学习交流,侵删请私信我,谢谢。