先放代码,具体过程明天再说。
Scrapy结构
定义item部分
import scrapy
class WawjItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
blockurl = scrapy.Field()
test = scrapy.Field()
blockname = scrapy.Field()
houseinfo = scrapy.Field()
housearea = scrapy.Field()
data = scrapy.Field()
totalprice = scrapy.Field()
unitprice = scrapy.Field()
Spider部分
# -*- coding: utf-8 -*-
import scrapy
from wawj.items import WawjItem
from scrapy.http import Request
class WawjspiderSpider(scrapy.Spider):
name = 'w2spider'
start_urls = ['http://hz.5i5j.com/community/gongshu/p1',
'http://hz.5i5j.com/community/xiacheng/p1',
'http://hz.5i5j.com/community/shangcheng/p1',
'http://hz.5i5j.com/community/binjiang/p1',
'http://hz.5i5j.com/community/yuhang/p1',
'http://hz.5i5j.com/community/xiaoshan/p1',
'http://hz.5i5j.com/community/xihu/p1',
'http://hz.5i5j.com/community/jianggan/p1',
'http://hz.5i5j.com/community/fuyang/p1',
'http://hz.5i5j.com/community/p2/',
'http://hz.5i5j.com/community/p3/',
'http://hz.5i5j.com/community/p4/',
'http://hz.5i5j.com/community/p5/',
'http://hz.5i5j.com/community/p6/',
'http://hz.5i5j.com/community/p7/']
# start_urls = ['http://hz.5i5j.com/community/p7/']
base_url = 'http://hz.5i5j.com'
base_exchang_url = '/exchange/getdeals?communityId='
def parse(self, response):
maxnum = int(response.xpath("//div[@class='list-comm-l']/h3[@class='list-comm-sort']/font[@class='font-houseNum']/text()").extract()[0])
if maxnum % 12 == 0:
maxpage = maxnum/12
else:
maxpage = maxnum/12 + 1
for page in range(1,maxpage+1):
url = response.url + 'n' + str(page)
yield Request(url,callback=self.get_blockid)
def get_blockid(self,response):
block_list = response.xpath("//ul[@class='list-body']/li/div[@class='list-info-comm']/h2/a")
block_num = []
for block in block_list:
item = WawjItem()
block_name = block.xpath("./text()").extract()[0]
item['blockname'] = block_name
block_url = block.xpath("./@href").extract()[0]
id = block_url.split('/')[2]
block_url = self.base_url + block_url
item['blockurl'] = block_url
block_num.append(block_url)
yield Request(block_url,callback=self.get_maxpage_block,dont_filter=True,meta={'item':item,'id':id,'block_num':block_num})
def get_maxpage_block(self,response):
item = response.meta['item']
id = response.meta['id']
maxpage_block_lst = response.xpath("//ul[@class='deal-page']/a[last()-1]/li/text()").extract()
if len(maxpage_block_lst) != 0:
maxpage_block = int(maxpage_block_lst[0])
else:
maxpage_block = 1
for page in range(1,maxpage_block+1):
url = self.base_url + self.base_exchang_url + id + '&page=' + str(page)
yield Request(url,dont_filter=True,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36','X-Requested-With':'XMLHttpRequest'},callback=self.get_info,meta={'item':item})
def get_info(self,response):
print 'Sell url: ',response.url
item = response.meta['item']
node_list = response.xpath("//ul[@class='watch-record-text2']")
if len(node_list) == 0:
item['houseinfo'] = 'None'
item['housearea'] = 'None'
item['data'] = 'None'
item['totalprice'] = 'None'
item['unitprice'] = 'None'
yield item
else:
for node in node_list:
item['houseinfo'] = node.xpath("./li[1]/p[2]/b/text()").extract()[0]
item['housearea'] = node.xpath("./li[2]/text()").extract()[0]
item['data'] = node.xpath("./li[3]/text()").extract()[0]
item['totalprice'] = node.xpath("./li[4]/text()").extract()[0]
item['unitprice'] = node.xpath("./li[5]/text()").extract()[0]
yield item
定义peplines部分
import json
import pandas
class LianjiaPipeline(object):
def __init__(self):
self.f = open('c:\\test\\ceshi.json','w')
def process_item(self, item, spider):
content = json.dumps(dict(item),ensure_ascii=False)+'\n'
self.f.write(content.encode('utf-8'))
return item
def close_spider(self,spider):
self.f.close()