# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 19:21:38 2017
@author: night
"""
from bs4 import BeautifulSoup
import requests
import json
import re
import time
from collections import Counter
import pandas as pd
starturl_list = ['https://hz.lianjia.com/ershoufang/jianggan/',
'https://hz.lianjia.com/ershoufang/xihu/',
'https://hz.lianjia.com/ershoufang/xiacheng/',
'https://hz.lianjia.com/ershoufang/gongshu/',
'https://hz.lianjia.com/ershoufang/shangcheng/',
'https://hz.lianjia.com/ershoufang/binjiang/',
'https://hz.lianjia.com/ershoufang/yuhang/',
'https://hz.lianjia.com/ershoufang/xiaoshan/',
'https://hz.lianjia.com/ershoufang/xiasha/']
#获取最大页面数量
#request = requests.get('https://hz.lianjia.com/ershoufang/xiasha/')
#soup = BeautifulSoup(request.text,'html.parser')
#num = json.loads(soup.find('div',{'class':"page-box house-lst-page-box"}).get('page-data'))['totalPage']
#get_pageurls(starturl_list[4])
#获取二手房每一页的url
def get_pageurls(url):
request = requests.get(url)
soup = BeautifulSoup(request.text,'html.parser')
totalnum = json.loads(soup.find('div',{'class':"page-box house-lst-page-box"}).get('page-data'))['totalPage']+1
# pageurls_list = []
pageurls_list.append(url)
for num in range(2,totalnum):
newurl = url + 'pg{}/'.format(num)
pageurls_list.append(newurl)
# return pageurls_list
#get_pageurls(starturl_list[0])
#获取每一页的二手房url
def get_eachurls(pageurl):
request = requests.get(pageurl)
soup = BeautifulSoup(request.text,'html.parser')
for i in soup.find_all('li',{'class':'clear'}):
# if i.a.get('href') not in eachurl_list:
eachurl_list.append(i.a.get('href'))
#获取二手房信息
allinfo = []
def houseinformation(houseurl):
global allinfo
request = requests.get(houseurl)
soup = BeautifulSoup(request.text,'html.parser')
info = {}
list = soup.find_all('script')
# try:
house_info_pre = list[26].text.encode('utf-8')
pattern_position = '''resblockPosition.*\''''
pos = re.search(re.compile(pattern_position),house_info_pre).group(0).split("'")[1]
longi = pos.split(',')[0]
lati = pos.split(',')[1]
try:
info[u'经度'] = longi
except:
info[u'经度'] = None
try:
info[u'纬度'] = lati
except:
info[u'纬度'] = None
#获取标题、单价、总价
try:
info[u'标题'] = unicode(soup.find('div',{'class':'title'}).contents[1].get('title'))
except:
info[u'标题'] = None
try:
info[u'副标题'] = unicode(soup.find('div',{'class':'title'}).contents[3].string)
except:
info[u'副标题'] = None
try:
info[u'总价'] = soup.find('div',{'class':'price'}).find('span',{'class':'total'}).string + soup.find('div',{'class':'price'}).find('span',{'class':'unit'}).string
except:
info[u'总价'] = None
try:
info[u'单价'] = soup.find('span',{'class':'unitPriceValue'}).get_text()
except:
info[u'单价'] = None
#获取基本属性
base = soup.find('div',{'class':'introContent'}).contents[1].ul.find_all('li')
try:
info[u'房屋类型'] = unicode(base[0].contents[1].string)
except:
info[u'房屋类型'] = None
try:
info[u'所在楼层'] = unicode(base[1].contents[1].string)
except:
info[u'所在楼层'] = None
try:
info[u'建筑面积'] = unicode(base[2].contents[1].string)
except:
info[u'建筑面积'] = None
try:
info[u'户型结构'] = unicode(base[3].contents[1].string)
except:
info[u'户型结构'] = None
try:
info[u'套内面积'] = unicode(base[4].contents[1].string)
except:
info[u'套内面积'] = None
try:
info[u'建筑类型'] = unicode(base[5].contents[1].string)
except:
info[u'建筑类型'] = None
try:
info[u'房屋朝向'] = unicode(base[6].contents[1].string)
except:
info[u'房屋朝向'] = None
try:
info[u'建筑结构'] = unicode(base[7].contents[1].string)
except:
info[u'建筑结构'] = None
try:
info[u'配备电梯'] = unicode(base[8].contents[1].string)
except:
info[u'配备电梯'] = None
#获取交易属性
trans = soup.find('div',{'class':'introContent'}).contents[3].ul.find_all('li')
try:
info[u'挂牌时间'] = unicode(trans[0].contents[1].string)
except:
info[u'挂牌时间'] = None
try:
info[u'交易属性'] = unicode(trans[1].contents[1].string)
except:
info[u'交易属性'] = None
try:
info[u'上次交易'] = unicode(trans[2].contents[1].string)
except:
info[u'上次交易'] = None
try:
info[u'房屋用途'] = unicode(trans[3].contents[1].string)
except:
info[u'房屋用途'] = None
try:
info[u'房屋年限'] = unicode(trans[4].contents[1].string)
except:
info[u'房屋年限'] = None
try:
info[u'产权所属'] = unicode(trans[5].contents[1].string)
except:
info[u'产权所属'] = None
try:
info[u'抵押信息'] = unicode(trans[6].contents[1].string)
except:
info[u'抵押信息'] = None
try:
info[u'房本备件'] = unicode(trans[7].contents[1].string)
except:
info[u'房本备件'] = None
try:
info[u'房源编码'] = unicode(trans[8].contents[1].string)
except:
info[u'房源编码'] = None
#获取小区信息
try:
info[u'小区名称'] = re.search("resblockName:'(.*?)'",request.text).group(1)
except:
info[u'小区名称'] = None
try:
info[u'网址'] = houseurl
except:
info[u'网址'] = None
allinfo.append(info)
#########################################################################
#get_pageurls(starturl_list[0])
pageurls_list = []
eachurl_list = []
get_pageurls(starturl_list[7])
n = 1
for i in pageurls_list:
get_eachurls(i)
print '储存第{}页网址'.format(n)
n+=1
for i in range(len(eachurl_list)):
houseinformation(eachurl_list[i])
print u'抓取第{}条信息,房源名称为:{}'.format(i+1,allinfo[i][u'标题'])
# time.sleep(0.5)
df = pd.DataFrame(allinfo)
df.to_csv(r"C:\test\data_lianjia_shangcheng.csv",encoding='gb18030')
Python爬虫代码--链家杭州二手房
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 上周日的时候,用python写了一个爬虫,抓取了杭州各个区的二手房信息,并简单分析了一下。本意是进行python爬...
- 有这样一个excel表格,有若干个你想关注的小区,只要在小区下面填好类似http://esf.fangdd.com...