我们只谈技术,不论内容哈。
一:python中的线程池使用
这里就举个例子来简单学习下了。
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import time
def spider(page):
time.sleep(page)
print(f"crawl task{page} finished")
return page
with ThreadPoolExecutor(max_workers=5) as t: # 创建一个最大容纳数量为5的线程池
task1 = t.submit(spider, 1)
task2 = t.submit(spider, 2) # 通过submit提交执行的函数到线程池中
task3 = t.submit(spider, 3)
# 通过done来判断线程是否完成
print(f"task1: {task1.done()}") # result: False
print(f"task2: {task2.done()}") # result: False
print(f"task3: {task3.done()}") # result: False
time.sleep(2.5)
print(f"task1: {task1.done()}") # result: True after 2.5s
print(f"task2: {task2.done()}") # result: True after 2.5s
print(f"task3: {task3.done()}") # result: False after 2.5s
print(task1.result()) # 通过result来获取返回值
其实用起来也是挺简单的了。
二:用在爬虫爬取mm131
我们上完整代码
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import time
import os
import requests
from bs4 import BeautifulSoup
rootrurl = 'https://www.mm131.net/'
save_dir = 'D:/estimages/'
no_more_pages = 'END'
max_pages = 10
headers = {
"Referer": rootrurl,
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive'
} ###设置请求的头部,伪装成浏览器
def getAllTags(rootrurl):
taglist = {}
html = BeautifulSoup(requests.get(rootrurl, headers=headers).text.encode('iso-8859-1').decode('gbk'),
features="html.parser")
a_s = html.find('div', {'class': 'nav'}).find_all('a')[1:]
for a in a_s:
taglist[a.get('href')] = save_dir + a.string
return taglist
def saveOneImg(dir, img_url):
print(img_url)
img = requests.get(img_url, headers=headers) # 请求图片的实际URL
with open(
'{}/{}'.format(dir, img.url.split("/")[-1]), 'wb') as jpg: # 请求图片并写进去到本地文件
jpg.write(img.content)
def saveImgGroup(dir, href):
html = BeautifulSoup(requests.get(href, headers=headers).text.encode('iso-8859-1').decode('gbk'),
features="html.parser")
# get total pages
totalPages = int(html.find('div', {'class': 'content-page'}).find('span', {'class': 'page-ch'}).string[1:-1])
# directly get url of imgs
for i in range(1, (totalPages+1)):
url = 'https://img1.nthjjz.com/pic/%s/%d.jpg' % (href.split("/")[-1][:-5], i)
saveOneImg(dir, url)
def saveOnePageFunc(dir, imgsPage):
for imgPage in imgsPage:
# 按照tag和图片组的内容来创建目录
new_dir = '{}/{}'.format(dir, imgPage.find('img').get('alt'))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
saveImgGroup(new_dir, imgPage.get('href'))
pass
def tagSpider(t, tag, dir):
url = tag + 'index.html'
while 1:
# 解析当前页面
html = BeautifulSoup(requests.get(url, headers=headers).text.encode('iso-8859-1').decode('gbk'),
features="html.parser")
# 提交一个保存页面的任务
saveOnePageFunc(dir, html.find('dl', {'class': 'list-left public-box'}).find_all('a', {'target': '_blank'}))
# 找到下一个页面的地址
next_page = html.find('dd', {'class': 'page'}).find_all('a')[-2]
if next_page.get_text() != '下一页':
break
url = tag + next_page.get('href')
if __name__ == '__main__':
# 获得所有标签
taglist = getAllTags(rootrurl)
print(taglist)
# 给每个标签配备一个线程
with ThreadPoolExecutor(max_workers=10) as t: # 创建一个最大容纳数量为20的线程池
for tag, dir in taglist.items():
t.submit(tagSpider, t, tag, dir)
# 等待所有线程都完成。
while 1:
time.sleep(1)
自行实验效果