可能很多爬虫新手都是从爬取漂亮的妹子开始的,本爬虫新手也是,之前在网上也找到很多爬虫代码,下面讲的爬虫是在之前网友的爬虫的基础上进行了调整。下面就详细介绍这只爬虫。
这个是我爬虫的成果啦,欢迎大家欣赏,哈哈。
一、确认爬虫目标
妹子图:http://www.mzitu.com/all,相信很多人都不陌生,哈哈。网友们的福利啊,既可以学到知识,还可以养养眼,何乐不为呢。
二、明确思路
1、获取每组图片的详细页面,就是url跳转
2、然后获取详细页面内的页码
3、然后把每页的图片的src中的路径临时保存下来
4、启用多进程保存临时保存下来的图片,大功告成,差不多十分钟可以下载一个月的图片,多进程比之前快多了。
三、详细代码
代码主要分为两个模块:
1、获取图片的url
2、多进程下载url
http请求都使用了反爬虫的随机head请求、Referer参数设置,提高爬虫的健壮性。
此程序开箱即用,欢迎大家一起多多交流,微信:Nice-1008
from bs4 import BeautifulSoup
import time
import multiprocessing
import utils
allpic = []
class mzitu():
##获取所有的(url、名称)
def all_url(self, url):
html = utils.requestpic(url,url)
#先找到年份
yy = 2018
all_year = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('ul', class_='archives')
#archives
for year in all_year:
if(yy==2018):
all_mouth = year.find_all('li')
for mouth in all_mouth:
if(mouth.find('em').text=="10月"):
print(mouth.find('em').text )
all_a = mouth.find('p', class_='url').find_all('a')
#倒叙处理
all_a.reverse()
j = 1
for a in all_a:
title = str(yy)+'年'+mouth.find('em').text+'_'+str(str(j).zfill(2))+'_'+a.get_text()
print(title)
path = title.replace("?", '_').replace(":", '_')
if(utils.mkdir(path)):
href = a['href']
self.html(path,href)
j = j+1
yy = yy-1
##获取页数
def html(self,title, href):
html = utils.requestpic(href,href)
#获取分页的页码
max_span = BeautifulSoup(html.text, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()
for page in range(1, int(max_span) + 1):
page_url = href + '/' + str(page)
self.img(title,page_url)
##查看每页的内容
def img(self,title, page_url):
#print(page_url)
img_html = utils.requestpic(page_url,page_url)
temp_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image')
if(len(temp_url)>0):
img_url = temp_url.find('img')['src']
allpic.append([img_url,title,page_url])
#self.save(img_url,page_url)
if __name__ == "__main__":
start = time.time()
utils.get_ips()
Mzitu = mzitu() ##实例化
Mzitu.all_url('http://www.mzitu.com/all')
start_use = time.time()-start
start2 = time.time()
#多进程保存图片
pool = multiprocessing.Pool(8)
for pic in allpic:
pool.apply_async(utils.save, args=(pic[0],pic[1],pic[2], ))##调用img函数
pool.close()
pool.join()
print ('[info]耗时:%s'%(start_use))
print ('[info]耗时:%s'%(time.time()-start2))
from bs4 import BeautifulSoup
import requests
import os
import random
import subprocess
from win32com.client import Dispatch
base_path = "C:\Develop\\meizitu\mzitu"
ips_list = []
##保存页面中的图片
def save(img_url,folder, page_url):
print(u'保存图片:'+img_url+" "+folder+" "+page_url)
name = img_url[img_url.rindex('/')+1:img_url.rindex('.')]
try:
img = requestpic(img_url, page_url)
f = open(base_path+"/"+folder+"/"+name + '.jpg', 'ab')
f.write(img.content)
f.close()
except FileNotFoundError: ##捕获异常,继续往下走
print(u'图片不存在已跳过:', img_url)
return False
##保存页面中的图片
def savepic(img_url,title, page_url):
print(u'保存图片:'+img_url+" "+folder+" "+page_url)
name = img_url[img_url.rindex('/')+1:img_url.rindex('.')]
try:
img = requestpic(img_url, page_url)
f = open(base_path+"/"+title + '.jpg', 'ab')
f.write(img.content)
f.close()
except FileNotFoundError: ##捕获异常,继续往下走
print(u'图片不存在已跳过:', img_url)
return False
##创建文件夹
def mkdir(path):
path = path.strip()
isExists = os.path.exists(os.path.join(base_path, path))
if not isExists:
print(u'新建文件夹', path)
os.makedirs(os.path.join(base_path, path))
os.chdir(os.path.join(base_path, path)) ##切换到目录
return True
else:
print(u'文件夹已存在', path)
return False
##发起http请求
def request(url):
headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"}
content = requests.get(url, headers=headers, proxies={"http": "127.0.0.1:1080"})
return content
##多个headers请求
def requestpic(url, Referer): ##这个函数获取网页的response 然后返回
num = 1
while num<6:
try:
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
#随机头
ua = random.choice(user_agent_list)
headers = {'User-Agent': ua,"Referer":Referer} ##较之前版本获取图片关键参数在这里
#随机ip
#ip = random.choice(ips_list)
ip = "127.0.0.1:1080"
content = requests.get(url, headers=headers, proxies={"http": ip},timeout=3)
except requests.exceptions.ReadTimeout:
print('超时'+str(num)+'次,重试')
num += 1
except Exception as e:
print('异常'+str(num)+'次,重试')
num += 1
else:
return content
break
else:
print('超时多次,失败')
##初始化ip池
def get_ips():
html = request('http://www.xicidaili.com/wn')
all_lst = BeautifulSoup(html.text, 'lxml').find('table').find_all(class_='odd')
i=1
for lst in all_lst:
if(i<=5):
all_td = lst.find_all('td')
ip = str(all_td[1].text)+':'+str(all_td[2].text)
ips_list.append(ip)
i=i+1
if __name__ == "__main__":
#获取ip池
#get_ips()
#随机ip请求
#html2 = requestpic('http://www.baidu.com',"http://www.baidu.com")
#print(html2)