1、User-agent:
在middlewares.py
中加入
class my_useragent(object):
def process_request(self, request, spider):
useragent = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16",
"Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14",
"Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14",
"Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0",
"Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)",
"Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)",
]
request.headers['User-Agent'] = random.choice(useragent)
# request.headers['Referer'] = 'https://www.google.com'
2、代理IP
class my_proxy(object):
def process_request(self, request, spider):
request.meta["proxy"] = "proxy.abuyun.com:9020"
proxy_name_pass = b'xxx:xxx'
encode_pass_name = base64.b64encode(proxy_name_pass)
request.headers["Proxy-Authorization"] = 'Basic ' + encode_pass_name.decode()
在middlewares.py
中设置:
DOWNLOADER_MIDDLEWARES = {
# 'placesmap.middlewares.PlacesmapDownloaderMiddleware': 543,
'placesmap.middlewares.my_useragent': 299,
'placesmap.middlewares.my_proxy': 298,
}
3、图片下载:
第一步:在爬虫中得到图片地址
item['image'] = parse.urljoin(response.url, img)
第二步:在IMAGES_STORE
中定义存放地址
project_dir = os.path.abspath(os.path.dirname(__file__))
IMAGES_STORE = os.path.join(project_dir, 'images')
第三步写 ImagePipeline
,具体如下:
此方法定义了下载路径(存放地址)和图片的名字
class MyImagePipeline(ImagesPipeline):
def get_media_requests(self,item,info):
if item['image']:
yield scrapy.Request(item['image'],meta={'item':item})
def file_path(self, request, response=None, info=None):
name = slugify(request.meta['item']['name'])
cate_name = 'state'
# child_cate_name = 'subcategory'
# ss = random.choice([i for i in range(1,50000)])
path1 = r'%s' %(cate_name)
path = r'{}/{}.{}'.format(path1, name, 'jpg')
return path
def item_completed(self,results,item,info):
image_path = [x['path'] for ok,x in results if ok]
if not image_path:
item['image'] = '' #得不到结果则为空
# raise DropItem('Item contains no images')
else:
item['image']=image_path[0]
return item
最后在settings.py
中加入配置即可
ITEM_PIPELINES = {
# 'place.pipelines.PlacesmapPipeline': 300,
#这个是异步存储模式
'place.pipelines.MysqlTwistedPipline': 301,
#这个是同步存储模式
# 'place.pipelines.MysqlPipeline': 300,
'place.pipelines.MyImagePipeline': 1,
}
4、MySQL存储
一、异步模式
在settings.py
中加入MySQL配置信息
MYSQL_HOST = '127.0.0.1'
MYSQL_DB = 'place'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'root'
在pipelines
中加入以下代码
from twisted.enterprise import adbapi
import MySQLdb
import MySQLdb.cursors
class MysqlTwistedPipline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbparms = dict(
host = settings["MYSQL_HOST"],
db = settings["MYSQL_DB"],
user = settings["MYSQL_USER"],
passwd = settings["MYSQL_PASSWORD"],
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor,
use_unicode=True,
)
dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms)
return cls(dbpool)
def process_item(self, item, spider):
#使用twisted将mysql插入变成异步执行
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error, item, spider) #处理异常
def handle_error(self, failure, item, spider):
#处理异步插入的异常
print (failure)
def do_insert(self, cursor, item):
#执行具体的插入
#根据不同的item 构建不同的sql语句并插入到mysql中
insert_sql, params = item.get_insert_sql()
cursor.execute(insert_sql, params)
在Items.py
中加入
class DoubanItem(scrapy.Item):
# define the fields for your item here like:
name = scrapy.Field()
image = scrapy.Field()
des = scrapy.Field()
def get_insert_sql(self):
insert_sql = """
INSERT INTO `place` (name, image, des) VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE des=VALUES(des),image=VALUES(image)
"""
params = (
self['name'], self['image'], self['des'],
)
return insert_sql, params
此方法只用在items.py中改相应的内容就可以了。
二、同步模式
这个方法适用更广泛一点,如在解析json文件做循环时,上面就会有重复数据,这个模式则不出错。
在pipilines中导入
from scrapy.utils.project import get_project_settings
定入代码如下:
class MysqlPipeline(object):
#采用同步的机制写入mysql
def __init__(self):
settings = get_project_settings()
self.conn = MySQLdb.connect(settings["MYSQL_HOST"], settings["MYSQL_USER"], settings["MYSQL_PASSWORD"], settings["MYSQL_DB"], charset="utf8mb4", use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
insert_sql = 'INSERT INTO `us_street` (name, zipcode_id, slug, place_id) VALUES (%s, %s, %s, %s)'
value = (item['name'], item['zipcode_id'], item['slug'], item['place_id'])
self.cursor.execute(insert_sql, value)
self.conn.commit()
在同步模式中后面的process_item
可以简化为:
def __init__(self):
settings = get_project_settings()
self.conn = MySQLdb.connect(settings["MYSQL_HOST"], settings["MYSQL_USER"], settings["MYSQL_PASSWORD"], settings["MYSQL_DB"], charset="utf8mb4", use_unicode=True)
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
insert_sql, params = item.get_insert_sql()
self.cursor.execute(insert_sql, params)
self.conn.commit()