由于说到Python爬虫一定绕不过Scrapy框架,所以这次也就尝试将之前的爬虫用Scrapy框架爬取拉勾网,这个要感谢Mr_Cxy的代码。
普通版:Python爬虫作业 | 爬取拉勾职位信息
文件目录
直接上代码
IDE调试入口
entrypoint.py
# -*- coding: utf-8 -*-
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'lagouscrapy'])
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class LagouscrapyItem(scrapy.Item, scrapy.Field):
# define the fields for your item here like:
# name = scrapy.Field()
positionName = scrapy.Field()
companyFullName = scrapy.Field()
workYear = scrapy.Field()
salary = scrapy.Field()
district = scrapy.Field()
默认创建项目生成 middlewares.py
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class LagouscrapySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
class LagouscrapyPipeline(object):
def process_item(self, item, spider):
conn = MySQLdb.Connection(host='172.16.110.163', user='lagou', passwd='', db='lagou', charset='utf8')
cursor = conn.cursor()
cursor.execute("insert into jobinfo(positionname,companyfullname,workyear,salary,district) values(%s,%s,%s,%s,%s)",(item['positionName'], item['companyFullName'], item['workYear'], item['salary'], item['district'],))
conn.commit()
cursor.close()
conn.close()
return item
settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for lagouscrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'lagouscrapy'
SPIDER_MODULES = ['lagouscrapy.spiders']
NEWSPIDER_MODULE = 'lagouscrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'lagouscrapy.middlewares.LagouscrapySpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'lagouscrapy.middlewares.ProxyMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'lagouscrapy.pipelines.LagouscrapyPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
scrapy.cfg
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.org/en/latest/deploy.html
[settings]
default = lagouscrapy.settings
[deploy]
#url = http://localhost:6800/
project = lagouscrapy
lagouscrapy.py
# -*- coding: utf-8 -*-
import sys
import json
import requests
import scrapy
from lxml import etree
from scrapy.http import FormRequest
reload(sys)
sys.path.append('..')
sys.setdefaultencoding('utf-8')
class lagouscrapy(scrapy.Spider):
name = 'lagouscrapy'
city_list = ['北京', '上海', '广州', '深圳', '青岛','杭州']
position_list = ['Python工程师', '大数据', '云计算', 'docker', '中间件', 'Node.js', '数据挖掘', \
'自然语言处理', '搜索算法', '精准推荐', '全栈工程师', '图像处理','机器学习', '语音识别']
base_url = 'https://www.lagou.com/jobs/list_%s?px=default&city=%s'
cookies={
'user_trace_token':'20170426104536-43eecce9f0d84710a63c6cfc34534d3d',
'LGUID':'20170426104536-6d56bd01-2a2a-11e7-beac-525400f775ce',
'showExpriedIndex':1,
'showExpriedCompanyHome':1,
'showExpriedMyPublish':1,
'hasDeliver':80,
'index_location_city':'%E6%9D%AD%E5%B7%9E',
'JSESSIONID':'D4E8E92335902847EAFC290EECC581CB',
'TG-TRACK-CODE':'search_code',
'SEARCH_ID':'b642e683bb424e7f8622b0c6a17ffeeb',
'Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6':'1493177918,1493192358,1493867575,1494227146',
'Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6':'1494310035',
'_ga':'GA1.2.605883278.1493174738',
'LGSID':'20170509140720-c2ed1f62-347d-11e7-bcb7-525400f775ce',
'LGRID':'20170509140720-c2ed2205-347d-11e7-bcb7-525400f775ce',
'_putrc':''
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Cookie':'user_trace_token=20170426104536-43eecce9f0d84710a63c6cfc34534d3d; LGUID=20170426104536-6d56bd01-2a2a-11e7-beac-525400f775ce; index_location_city=%E6%9D%AD%E5%B7%9E; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=80; login=false; unick=""; _putrc=""; TG-TRACK-CODE=search_code; JSESSIONID=D4E8E92335902847EAFC290EECC581CB; _gid=GA1.2.167447017.1494310035; _gat=1; _ga=GA1.2.605883278.1493174738; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1493177918,1493192358,1493867575,1494227146; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1494310035; LGSID=20170509140720-c2ed1f62-347d-11e7-bcb7-525400f775ce; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2Flist_python%25E5%25B7%25A5%25E7%25A8%258B%25E5%25B8%2588%3Fcity%3D%25E6%259D%25AD%25E5%25B7%259E; LGRID=20170509140720-c2ed2205-347d-11e7-bcb7-525400f775ce; SEARCH_ID=403ecbaea5d24a2a9abec368e74a8984',
'Host':'www.lagou.com',
'Origin':'https://www.lagou.com'
}
def get_complete_list(self):
complete_list = []
for position in self.position_list:
position = position.decode('utf-8')
for city in self.city_list:
city = city.decode('utf-8')
dict = {}
url = self.base_url % (position, city)
html = requests.get(url, headers=self.headers).content
selector = etree.HTML(html)
total_page = selector.xpath('//ul[@id="order"]/li/div[4]/div[3]/span[2]/text()')[0]
dict['city'] = city
dict['position'] = position
dict['total_page'] = total_page
complete_list.append(dict)
print complete_list
return complete_list
def start_requests(self):
complete_list = self.get_complete_list()
json_url = 'https://www.lagou.com/jobs/positionAjax.json?px=default&city=%s&needAddtionalResult=false'
for dict in complete_list:
city = dict['city']
position = dict['position']
total_page = int(dict['total_page'])
url = json_url % city
for page in range(1,total_page+1):
post_data = {
'first': 'true',
'pn': str(page),
'kd': position
}
yield FormRequest(url, formdata=post_data, cookies=self.cookies, callback=self.parse_json)
def parse_json(self, response):
json_data = json.loads(response.text.decode('utf-8'))
position_data = json_data["content"]["positionResult"]['result']
for position in position_data:
item = {}
item['positionName'] = position['positionName'].decode()
item['companyFullName'] = position['companyFullName'].decode()
item['workYear'] = position['workYear'].decode()
item['salary'] = position['salary'].decode()
if position['district'] is not None:
item['district'] = position['city'].decode() + position['district'].decode()
else:
item['district'] = position['city'].decode()
yield item
入库结果
待解决:
middlewares.py
中关于代理的使用,提高爬取的效率跟数据量
参考资料:
scrapy-chinese
https://stevenzhao.gitbooks.io/scrapy-chinese/content/index.html
Scrapy 1.3 documentation
https://doc.scrapy.org/en/latest/