在scrapy中新建一个proxies.py文件,执行一下把获取的IP保存到txt文件中去
# *-* coding:utf-8 *-*
import requests
from bs4 import BeautifulSoup
import lxml
from multiprocessing import Process, Queue
import random
import json
import time
import requests
class Proxies(object):
"""docstring for Proxies"""
def __init__(self, page=3):
self.proxies = []
self.verify_pro = []
self.page = page
self.headers = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
self.get_proxies()
self.get_proxies_nn()
def get_proxies(self):
page = random.randint(1, 10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nt/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower() + '://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1
def get_proxies_nn(self):
page = random.randint(1, 10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nn/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower() + '://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1
def verify_proxies(self):
# 没验证的代理
old_queue = Queue()
# 验证后的代理
new_queue = Queue()
print('verify proxy........')
works = []
for _ in range(15):
works.append(Process(target=self.verify_one_proxy, args=(old_queue, new_queue)))
for work in works:
work.start()
for proxy in self.proxies:
old_queue.put(proxy)
for work in works:
old_queue.put(0)
for work in works:
work.join()
self.proxies = []
while 1:
try:
self.proxies.append(new_queue.get(timeout=1))
except:
break
print('verify_proxies done!')
def verify_one_proxy(self, old_queue, new_queue):
while 1:
proxy = old_queue.get()
if proxy == 0: break
protocol = 'https' if 'https' in proxy else 'http'
proxies = {protocol: proxy}
try:
if requests.get('http://www.baidu.com', proxies=proxies, timeout=2).status_code == 200:
print('success %s' % proxy)
new_queue.put(proxy)
except:
print('fail %s' % proxy)
if __name__ == '__main__':
a = Proxies()
a.verify_proxies()
print(a.proxies)
proxie = a.proxies
with open('proxies.txt', 'a') as f:
for proxy in proxie:
f.write(proxy + '\n')
修改代理文件middlewares.py的内容为如下:(其中with open 中要改成你项目中生成的txt文件地址)
import random
import scrapy
from scrapy import log
class ProxyMiddleWare(object):
def process_request(self, request, spider):
'''对request对象加上proxy'''
proxy = self.get_random_proxy()
print("this is request ip:" + proxy)
request.meta['proxy'] = proxy
def process_response(self, request, response, spider):
'''对返回的response处理'''
# 如果返回的response状态不是200,重新生成当前request对象
if response.status != 200:
proxy = self.get_random_proxy()
print("this is response ip:" + proxy)
# 对当前reque加上代理
request.meta['proxy'] = proxy
return request
return response
def get_random_proxy(self):
'''随机从文件中读取proxy'''
while 1:
with open('此处为你项目中生成txt文件', 'r') as f:
proxies = f.readlines()
if proxies:
break
else:
time.sleep(1)
proxy = random.choice(proxies).strip()
return proxy
修改下settings文件,其中myproxies为你的项目名称
DOWNLOADER_MIDDLEWARES = {
# 'myproxies.middlewares.MyCustomDownloaderMiddleware': 543,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware':None,
'myproxies.middlewares.ProxyMiddleWare':125,
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware':None
}
个人感触,此方法爬取为西刺代理,很多代理IP不是特别稳定,对scrapy运行没有太大的帮助
文章参考链接:https://blog.csdn.net/weixin_40475396/article/details/78241238