贴出代码以供大家共享,其实爬虫很简单,难的是突破反爬虫,今天看了一些反爬虫的资料,明天或后天整理一下,写上一篇文章。
代码如下:
import requests
from bs4 import BeautifulSoup
import os,time,random
from selenium import webdriver
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) App leWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53"
]
baseurl='http://www.66bb.org'
href_list=[]
name_list=[]
UA=random.choice(user_agent_list)#获取随机的User_Agent
headers={'User_Agent':UA}
proxies = {"http": "http://119.5.0.58:808"}
proxies = {"http": "http://219.217.80.182:1080"}
html=requests.get(baseurl,headers=headers)
html.encoding='gb2312'
Soup=BeautifulSoup(html.text,'lxml')
href_all=Soup.find('ul',class_='topline_3b').findAll('li')
for i in range(0,len(href_all)):
lanmu_href=href_all[i].find('a')['href']
lanmu_name=href_all[i].find('a').get_text()
href_list.append(lanmu_href)
name_list.append(lanmu_name)
for lanmu_num in range(0,len(href_list)):
baseurl=baseurl+href_list[lanmu_num]
start_num=0
start_page=int(start_num/20)+1
start_image=int(start_num%20)
#看了看,一共有6页
for i in range(start_page,15):
#获取每页的链接和名称
if i != 1:
urllink=baseurl+'list4'+str(i)+'.html'
else:
urllink=baseurl
#获取第X页的第XX个链接地址和文件夹名称
UA=random.choice(user_agent_list)#获取随机的User_Agent
headers={'User_Agent':UA}
#proxies = {"http": "http://119.5.0.58:808"}
#proxies = {"http": "http://219.217.80.182:1080"}
html=requests.get(urllink,headers=headers)
html.encoding='gb2312'
Soup=BeautifulSoup(html.text,'lxml')
all_url=Soup.find(class_="fzltp").findAll('li')
#所有的dd标签都是图册,而dt是栏目头
print(u'第',i,u'页共包含有',len(all_url),u'个图册,开始下载')
print(all_url[0])
print(all_url[0].find('a'))
#开始提取图册的链接地址和帖子名称
for num in range(start_image,len(all_url)):
#获取地址,连接后解析html,主要目的是为了获取图片总数
href=all_url[num].find('a')['href']
href1='http://www.66bb.org'+href
#http://www.66xx.org/ArtDD/
UA=random.choice(user_agent_list)#获取随机的User_Agent
headers={'User_Agent':UA}
page1html=requests.get(href1,headers=headers)
page1html.encoding='gb2312'
page1soup=BeautifulSoup(page1html.text,'lxml')
total_pages_1=page1soup.find('div',class_='tpm01').find('font',color='blue').get_text()
total_pages_1=int(total_pages_1[-3:-1])
print(u'共有',total_pages_1,u'页图片需要保存')
#获取文件夹名字
dirname=all_url[num].find('img')['alt']
#建立文件夹,打开子文件夹
os.chdir("D:\\")
path = name_list[lanmu_num]+'\\'+dirname+'('+str(total_pages_1)+'P)'
isExists = os.path.exists(os.path.join("D:\\66人体艺术图片\\",path))
if not isExists:
print(u'建了一个名字叫做', path, u'的文件夹!')
os.makedirs(os.path.join("D:\\66人体艺术图片\\", path))
else:
print(u'名字叫做', path, u'的文件夹已经存在了!')
continue
#跳出本次循环
os.chdir("D:\\66人体艺术图片\\"+path)
#basename=imgsrc1[-14:-5]
#寻找所有的图片地址,打开图片网页,保存图片
#最后一个页面的网页打不开,不是个例,应该是个普遍现象
for i in range(1,total_pages_1+1):
baseurl_2=href1+str(i) +'.html'
html_2=requests.get(baseurl_2,headers=headers)
html_2.encoding='gb2312'
soup_2=BeautifulSoup(html_2.text,'lxml')
#提取主图的地址和名字
image_href=soup_2.find('div',class_='imgbox').find('img')['src']
image_href='http://www.66bb.org'+image_href
#image_name=soup_2.find('div',class_='imgbox').find('img')['alt']
#image_name=image_name[-3:]+'.jpg'
image_name=str(i)+'.jpg'
#这里如果普通的请求总是中断,可以使用selenium来操纵chrome和PhantomJS
#browser=webdriver.Chrome()
#browser = webdriver.PhantomJS(executable_path="phantomjs.exe")
#browser.get(image_href)
time.sleep(1)
#imghtml=browser.page_source #这个函数获取页面的html
UA=random.choice(user_agent_list)#获取随机的User_Agent
headers={'User_Agent':UA}
try:
imgrealhtml=requests.get(image_href,headers=headers)
except:
time.sleep(5)
print(u'请稍等,5秒后重连')
UA=random.choice(user_agent_list)#获取随机的User_Agent
headers={'User_Agent':UA}
imgrealhtml=requests.get(image_href,headers=headers)
f = open(image_name,'ab')
f.write(imgrealhtml.content)
f.close()
print(u'第',i,u'张图片保存完毕')
time.sleep(1)
#browser.quit()
print(u'共有',total_pages_1,u'张图片保存完毕')