scrapy版本
前前后后几个星期,错了改,改了错,错了再改!错了不要紧,错了才能诞生更好的版本。
mooc_spider.py
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from mooc.items import MoocItem
class MoocSpiderSpider(scrapy.Spider):
name = 'mooc_spider'
allowed_domains = ['www.xuexi.cn']
start_urls = ['https://www.xuexi.cn/lgdata/f547c0f321ac9a0a95154a21485a29d6/1cdd8ef7bfc3919650206590533c3d2a.json?_st=26434284']
def parse(self, response):
pydetail = json.loads(response.text)
DataSets = pydetail['DataSet']
for DataSet in DataSets:
dataset = DataSet.rsplit('!')[1]
dataset_url = 'https://www.xuexi.cn/lgdata/' + dataset + '?_st=26434284'
yield scrapy.Request(response.urljoin(dataset_url),callback=self.get_list_url)
def get_list_url(self,response):
pydetail = json.loads(response.text)
for i in pydetail:
list_url = i['url']
jsurl_list1 = list_url.rsplit('/')[3]
jsurl_list2 = list_url.rsplit('/')[4].replace('html', 'js')
new_url = 'https://www.xuexi.cn/' + jsurl_list1 + '/data' + jsurl_list2
yield scrapy.Request(response.urljoin(new_url),callback=self.get_page_url)
def get_page_url(self,response):
jsdetail = response.text.replace('globalCache =', '').replace(';', '')
pydetail = json.loads(jsdetail)
get_static_page_url = pydetail['fpe1ki18v228w00']
for detail in get_static_page_url:
static_page_url = detail['static_page_url']
jsurl_list1 = static_page_url.rsplit('/')[3]
jsurl_list2 = static_page_url.rsplit('/')[4].replace('html', 'js')
new_url = 'https://www.xuexi.cn/' + jsurl_list1 + '/data' + jsurl_list2
yield scrapy.Request(response.urljoin(new_url),callback=self.parse_page)
def parse_page(self,response):
jsdetail = response.text.replace('globalCache =', '').replace(';', '')
pydetail = json.loads(jsdetail)
get_info = pydetail['fp6ioapwuyb80001']['info']
mooc = get_info['mooc'].strip().replace('\x0b','')
mooc = re.sub(r'[?*"<>\:|\t\/\\]', '', mooc)
mooc_class = get_info['mooc_class'].strip().replace('\x0b','')
mooc_class = re.sub(r'[?*"<>\:|\t\/\\]', '', mooc_class)
frst_name = get_info['frst_name'].strip().replace('\x0b','')
frst_name = re.sub(r'[?*"<>\:|\t\/\\]', '', frst_name)[0:101].strip()
ossUrls = eval(get_info['ossUrl'])
for i, ossUrl in enumerate(ossUrls):
file_name = '第' + str(i + 1) + '节' + '.mp4'
video_url = ossUrl
item = MoocItem(mooc=mooc,mooc_class=mooc_class,frst_name=frst_name,file_name=file_name,video_url=video_url)
yield item
items.py
import scrapy
class MoocItem(scrapy.Item):
mooc = scrapy.Field()
mooc_class = scrapy.Field()
frst_name = scrapy.Field()
file_name = scrapy.Field()
video_url = scrapy.Field()
middlewares.py
设置随机请求头
import random
class UserAgentDownloadMiddleware(object):
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; rv:11.0) like Gecko'
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0'
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; rv:11.0) like Gecko'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5680.400 QQBrowser/10.2.1852.400'
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; Core/1.63.5680.400 QQBrowser/10.2.1852.400; rv:11.0) like Gecko'
]
def process_request(self,request,spider):
user_agent = random.choice(self.USER_AGENTS)
request.headers['User-Agent']=user_agent
pipelines.py
生成动态文件夹并下载
import os
from urllib import request
class MoocPipeline(object):
def open_spider(self, spider):
print("爬虫开始了......" * 5)
def process_item(self, item, spider):
mooc = item['mooc']
mooc_class = item['mooc_class']
frst_name = item['frst_name']
file_name = item['file_name']
video_url = item['video_url']
# muke_path = os.path.join(os.path.dirname(__file__), '慕课视频')
muke_path = 'D:\慕课视频'
if not os.path.exists(muke_path):
os.makedirs(muke_path)
mooc_class_path = os.path.join(muke_path, mooc, mooc_class, frst_name)
if not os.path.exists(mooc_class_path):
os.makedirs(mooc_class_path)
file_name_path = os.path.join(mooc_class_path, file_name)
if not os.path.exists(file_name_path):
try:
request.urlretrieve(video_url, file_name_path)
print('下载 ' + mooc + mooc_class + frst_name + file_name + '完成')
except:
count = 1
while count <= 5:
try:
request.urlretrieve(video_url, file_name_path)
break
except:
count += 1
if count > 5:
print(mooc + mooc_class + frst_name + file_name + '下载失败')
else:
print(mooc + mooc_class + frst_name + file_name + ' 已经下载完毕啦!!!')
return item
def close_spider(self, spider):
print("爬虫结束了!!!!" * 5)
settings.py
BOT_NAME = 'mooc'
SPIDER_MODULES = ['mooc.spiders']
NEWSPIDER_MODULE = 'mooc.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
LOG_LEVEL = 'WARNING'
ROBOTSTXT_OBEY = False
DEFAULT_REQUEST_HEADERS = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q = 0.9',
'Connection': 'close'
}
DOWNLOADER_MIDDLEWARES = {
# 'mooc.middlewares.MoocDownloaderMiddleware': 543,
'mooc.middlewares.UserAgentDownloadMiddleware': 543,
}
ITEM_PIPELINES = {
'mooc.pipelines.MoocPipeline': 300,
start.py
运行整个项目
from scrapy import cmdline
cmdline.execute('scrapy crawl mooc_spider'.split())
爬取成果:
爬取成果.png