python爬虫——图片下载
import requests
response=requests.get('https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1564640268773&di=90d3bdcd271057766ce22862c2c3b77e&imgtype=0&src=http%3A%2F%2Fmelbournemakerspace.org%2Fblog%2Fwp-content%2Fuploads%2F2015%2F04%2FPython.png')
#获取bytes的类型的响应
data=response.content
with open('python.png','wb') as f:
f.write(data)
python爬虫——分页、批量图片下载
- 爬虫分页 —— 用循环实现,用循环的步长
- 储存图片
- etree报红 ——新版本无法直接from lxml import etree这样,但是它只不过是换了一个办法引出etree模块
- etree报红也是可以运行的
- 可以用from lxml import html 换行 etree=html.etree 可以解决报红
# 导入
import requests
from lxml import html
etree=html.etree
def spider_douban_top250():
movie_list_info = []
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}
# 用于分页的操作
# 循环 步长25 页面显示25 对每一页进行循环
for i in range(0, 250, 25):
url = 'https://movie.douban.com/top250?start={}&filter='.format(i)
# 获取bytes类型响应
data = requests.get(url, headers=headers).content
html = etree.HTML(data)
ol_list = html.xpath('//div[@id="content"]//div[@class="article"]/ol/li')
for movie in ol_list:
# 影片序号
serial_number = movie.xpath('./div[@class="item"]/div[@class="pic"]/em/text()')
#简单的抛出异常操作,减少报错
if len(serial_number) == 0:
serial_number = ''
else:
serial_number = serial_number[0]
# print(serial_number)
# 电影的名字
movie_name = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="hd"]/a/span[1]/text()')
if len(movie_name) == 0:
movie_name = ''
else:
movie_name = movie_name[0]
# print(movie_name)
# 电影的介绍
movie_introduce = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/p[1]/text()')
if len(movie_introduce) == 0:
movie_introduce = ''
else:
movie_introduce = movie_introduce[0].strip()
# print(movie_introduce)
# 电影的星级
star = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[2]/text()')
if len(star) == 0:
star = ''
else:
star = star[0]
# print(star)
# 电影的评价
evalute = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[4]/text()')
if len(evalute) == 0:
evalute = ''
else:
evalute = evalute[0].replace('人评价', '')
# print(evalute)
# 电影的描述
describe = movie.xpath('./div[@class="item"]/div[@class="info"]/div[@class="bd"]/p[@class="quote"]/span[1]/text()')
if len(describe) == 0:
describe = ''
else:
describe = describe[0]
# print(describe)
# 电影封面的地址
movie_img_url = movie.xpath('./div[@class="item"]/div[@class="pic"]/a/img/@src')
if len(movie_img_url) == 0:
movie_img_url = ''
else:
movie_img_url = movie_img_url[0]
# print(movie_img_url)
# 将信息储存
movie_list_info.append({
'serial_number': serial_number,
'movie_name': movie_name,
'movie_introduce': movie_introduce,
'star': star,
'evalute': evalute,
'describe': describe,
'movie_img_url': movie_img_url
})
# for movie in movie_list_info:
# print(movie)
# 下载图片
for movie in movie_list_info:
url = movie['movie_img_url']
resp = requests.get(url)
# 获取状态码
# 当状态码等于200时可以进行下面的操作
if resp.status_code == 200:
img_name = '0000000{}.jpg'.format(movie['serial_number'])
with open('./img/{}'.format(img_name), 'wb') as f:
f.write(resp.content)
spider_douban_top250()