生产者消费者(多线程)版本
这个版本速度嗖嗖的、、、、爬点下来学习学习呀!!!
import requests
import re
import json
import os
from urllib import request
import threading
import time
import socket
from queue import Queue
class Producer(threading.Thread):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q = 0.9',
'Connection': 'close'
}
def __init__(self,page_queue,video_queue,*args,**kwargs):
super(Producer, self).__init__(*args,**kwargs)
self.page_queue = page_queue
self.video_queue = video_queue
def run(self):
while True:
if self.page_queue.empty():
break
page_url = self.page_queue.get()
self.parse_detail_page(page_url)
def get_newurl(self,url):
if 'data' not in url:
jsurl_list1 = url.rsplit('/')[3]
jsurl_list2 = url.rsplit('/')[4].replace('html', 'js')
new_url = 'https://www.xuexi.cn/' + jsurl_list1 + '/data' + jsurl_list2
else:
new_url = url
return new_url
def get_pydetail(self,url):
try:
new_url = self.get_newurl(url)
resp = requests.get(new_url, headers=self.headers)
if 'globalCache' in resp.text:
jsdetail = resp.text.replace('globalCache = ', '').replace(';', '')
pydetail = json.loads(jsdetail)
else:
pydetail = json.loads(resp.text)
resp.close()
except:
count = 1
while count <= 5:
try:
self.get_pydetail(url)
break
except:
count += 1
if count > 5:
print('pydetail获取失败')
return pydetail
def get_list_url(self,url):
pydetail = self.get_pydetail(url)
DataSets = pydetail['DataSet']
all_list_data_url = []
for DataSet in DataSets:
dataset = DataSet.rsplit('!')[1]
dataset_url = 'https://www.xuexi.cn/lgdata/' + dataset + '?_st=26434284'
list_list = self.get_pydetail(dataset_url)
for list in list_list:
list_url = list['url']
list_data_url = self.get_newurl(list_url)
all_list_data_url.append(list_data_url)
return all_list_data_url
def parse_page(self,url):
pydetail = self.get_pydetail(url)
get_static_page_url = pydetail['fpe1ki18v228w00']
all_detail_url = []
for detail in get_static_page_url:
static_page_url = detail['static_page_url']
detail_url = self.get_newurl(static_page_url)
all_detail_url.append(detail_url)
return all_detail_url
def parse_detail_page(self,url):
pydetail = self.get_pydetail(url)
get_info = pydetail['fp6ioapwuyb80001']['info']
mooc = get_info['mooc'].strip()
mooc = re.sub(r'[\\/\:?*"<>|\t]', '', mooc)
mooc_class = get_info['mooc_class'].strip()
mooc_class = re.sub(r'[\\/\:?*"<>|\t]', '', mooc_class)
frst_name = get_info['frst_name'].strip()
frst_name = re.sub(r'[\\/\:?*"<>|\t]', '', frst_name)[0:101].strip()
ossUrls = eval(get_info['ossUrl'])
for i, ossUrl in enumerate(ossUrls):
file_name = '第' + str(i + 1) + '节' + '.mp4'
video_url = ossUrl
self.video_queue.put((video_url,mooc,mooc_class,frst_name,file_name))
class Consumer(threading.Thread):
def __init__(self, page_queue, video_queue, *args, **kwargs):
super(Consumer, self).__init__(*args, **kwargs)
self.page_queue = page_queue
self.video_queue = video_queue
def run(self):
while True:
if self.page_queue.empty() and self.video_queue.empty():
break
video_url,mooc,mooc_class,frst_name,file_name =self.video_queue.get()
try:
# muke_path = os.path.join(os.path.dirname(__file__), '慕课视频')
muke_path = 'D:\慕课视频'
if not os.path.exists(muke_path):
os.makedirs(muke_path)
mooc_class_path = os.path.join(muke_path, mooc, mooc_class, frst_name)
if not os.path.exists(mooc_class_path):
os.makedirs(mooc_class_path)
except:
pass
file_name_path = os.path.join(mooc_class_path, file_name)
if not os.path.exists(file_name_path):
try:
request.urlretrieve(video_url, file_name_path)
print('下载 ' + mooc + frst_name + file_name + '完成')
except:
count = 1
while count <= 5:
try:
request.urlretrieve(video_url, file_name_path)
break
except:
count += 1
if count > 5:
print("downloading {}{} failed!".format(frst_name,file_name))
else:
print(mooc + mooc_class + frst_name + file_name + ' 已经下载完毕啦!!!')
def main():
page_queue = Queue(50000)
video_queue = Queue(100000)
producer = Producer(page_queue,video_queue)
url = 'https://www.xuexi.cn/lgdata/f547c0f321ac9a0a95154a21485a29d6/1cdd8ef7bfc3919650206590533c3d2a.json?_st=26434284'
list_urls = producer.get_list_url(url)
for list_url in list_urls:
page_urls = producer.parse_page(list_url)
for page_url in page_urls:
page_queue.put(page_url)
for i in range(5):
Producer(page_queue,video_queue).start()
for i in range(5):
Consumer(page_queue,video_queue).start()
if __name__ == '__main__':
main()
存在的问题:
爬取效率还可以,但是程序还是经常卡起不运行,也不报错。
小技巧:
1-创建下载路径时不要创建在项目内尤其是下载视频、音乐、图片等大文件
muke_path = os.path.join(os.path.dirname(__file__), '慕课视频')
而应该自己选择路径,不然运行IDE不堪重负啊,我用的pycharm,简直卡的我崩溃。
muke_path = 'D:\慕课视频'
2-python内置函数evel的用法,简单来讲就是把列表、字典、元祖形式的字符串转换成列表、字典、元祖。
例:我这个项目中就要把获得的每个视频的URL字符串转换为列表后面才可以遍历。
get_info = pydetail['fp6ioapwuyb80001']['info']
ossUrls = eval(get_info['ossUrl'])