二十七、 分布式爬虫– 实战– 链家网全国房源信息爬取(3)
pipeline.py 代码
import json
class LjPipeline(object):
def __init__(self):
self.fp = open("houses.txt",'w', encoding='utf-8')
def process_item(self, item, spider):
self.fp.write(json.dumps(dict(item),ensure_ascii=False)+"\n")
return item
def close_spider(self, spider):
self.fp.close()
settings.py 设置
ITEM_PIPELINES= {
'lj.pipelines.LjPipeline': 300,
}
创建 start.py 代码
from scrapy import cmdline
cmdline.execute("scrapycrawl house".split(" "))
续上例,house.py 示例代码
import scrapy
from ..items import LjItem
import re
import json
class HouseSpider(scrapy.Spider):
name = 'house'
allowed_domains = ['lianjia.com']
start_urls =['https://www.lianjia.com/city/']
def parse(self, response):
city_tags =response.css(".city_list_ul a")
for city_tag in city_tags:
city_url = city_tag.css("::attr(href)").get()
city_name =city_tag.css("::text").get()
item = LjItem(city=city_name)
yield scrapy.Request(city_url+"ershoufang/",callback=self.parse_region_list, meta={"item": item})
def parse_region_list(self, response):
# 解析行政区的url
item = response.meta.get('item')
region_tags =response.css("div[data-role='ershoufang'] .a")
for region_tag in region_tags:
region_url =region_tag.css("::attr(href)").get()
region_name =region_tag.css("::text").get()
item['region'] = region_name
yield scrapy.Request(response.urljoin(region_url), callback=self.parse_house_page,meta={"item": item})
def parse_house_page(self, response):
# 翻页
page_data =response.css("div[comp-module='page']::attr(page-data)").get()
totalPage =json.loads(page_data)['totalPage']
for x in range(1, totalPage + 1):
yield scrapy.Request(response.url +"pg" + str(x), callback=self.parse_house_list,meta={"item":response.meta.get('item')})
def parse_house_list(self, response):
# 解析房源列表
item =response.meta.get("item")
detail_urls = response.css(".sellListContentli>a::attr(href)").getall()
for detail_url in detail_urls:
result =re.search(r'/ershoufang/\d+\.html', detail_url)
if result:
yieldscrapy.Request(detail_url, callback=self.parse_house, meta={"item":item})
def parse_house(self, response):
# 解析房源详情页
item = response.meta.get('item')
item['title'] =response.css("h1.main::text").get()
item['total_price'] =response.css(".price.total::text").get()
item['unit_price'] =response.css(".unitPriceValue::text").get()
item['house_type'] =response.css(".content ul li:nth-child(1)::text").get()
item['orientation'] =response.css(".content ul li:nth-child(7)::text").get()
item['full_area'] =response.css(".content ul li:nth-child(3)::text").get()
item['inside_area'] =response.css(".content ul li:nth-child(5)::text").get()
item['years'] =response.css(".area .subInfo::text").get()
yield item
上一篇文章 第六章 Scrapy框架(二十六) 2020-03-28 地址:
https://www.jianshu.com/p/cd9593e50b3a
下一篇文章 第六章 Scrapy框架(二十八) 2020-03-30 地址:
https://www.jianshu.com/p/fe9e9f6505c6
以上资料内容来源网络,仅供学习交流,侵删请私信我,谢谢。