#!/usr/bin/env python
#encoding:utf-8
import requests
from bs4 import BeautifulSoup
import urllib
import sys
import re
# 解决编码错误问题
reload(sys)
sys.setdefaultencoding('utf8')
def getHex(words):
mywords = words.split("%")[1:]
result = ""
for i in mywords:
result += chr(int(i, 16))
return result
'''
电影天堂模块
'''
# config-start
maxPage = 5
modelName = "电影天堂"
url = "http://www.dy2018.com"
keyword = sys.argv[1]
pageSize = 20
keywordURLencode = urllib.quote(keyword.decode(sys.stdin.encoding).encode('GBK')) # 将查询关键字进行URL编码
searchUrl = "http://www.dy2018.com/e/search/index.php"
postData = {
'classid':'0',
'show':'title,smalltext',
'tempid':'1',
'keyboard': getHex(keywordURLencode),
'Submit':chr(0xC1) + chr(0xA2) + chr(0xBC) + chr(0xB4) + chr(0xCB) + chr(0xD1) + chr(0xCB) + chr(0xF7)
}
headers = {
'Host' : 'www.dy2018.com',
'Cache-Control' : 'max-age=0',
'Origin' : 'http://www.dy2018.com',
'Upgrade-Insecure-Requests' : '1',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Content-Type' : 'application/x-www-form-urlencoded',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer' : 'http://www.dy2018.com/index.html',
'Accept-Encoding' : 'gzip, deflate',
'Accept-Language' : 'zh-CN,zh;q=0.8,en;q=0.6',
'Connection' : 'close'
}
# config-end
def getContent(url):
global headers
response = requests.get(url, headers=headers)
response.encoding = 'gb2312' # 设置相应体的字符集
return response.text
def getResultNumber(soup):
a = soup.find("a", title="总数")
totalResult = a.find("b").string
return int(totalResult)
def getSoupByPostData(url,postData):
global headers
content = requests.post(url, data=postData, headers=headers).text.decode("UTF-8")
soup = BeautifulSoup(content, "html.parser")
return soup
def getPageNumber(resultNumber):
global pageSize
return int((resultNumber / pageSize)) + 1
def getPageID(soup):
div = soup.find('div', class_="x", style="text-align: center;font-size: 14px;margin: 5px 0;")
firstPage = div.findAll("a")[1]
firstPageStr = firstPage['href']
pageID = firstPageStr.split("-")[1]
return pageID
def getResultDic(soup):
results = []
tables = soup.findAll("table", width="100%", border="0", cellspacing="0", cellpadding="0", class_="tbspan", style="margin-top:6px")
for table in tables:
# 获取结果标题
title = str(table.find("a")["title"])
# 获取结果描述
describe = table.find("td", colspan="2", style="padding-left:3px").string
# 获取页面详细地址
src = url + table.find("a")['href']
# 获取条目时间和点击量
temp = table.find("font", color="#8F8C89").string
time = temp.split("\n")[0].split(":")[1][0:-1] # 注意这里是中文冒号
click = temp.split("\n")[1].split(":")[1] # 注意这里是中文冒号
# 获取下载地址
downloadLinks = []
newContent = getContent(src)
newSoup = BeautifulSoup(newContent, "html.parser")
tbodys = newSoup.findAll("tbody")
for tbody in tbodys:
downloadLinks.append(tbody.find("a")['href'])
result = {
"title":title,
"describe":describe,
'time':time,
'click':click,
"downloadLink":downloadLinks
}
results.append(result)
print "单条数据获取成功 !"
return results
soup = getSoupByPostData(searchUrl, postData)
resultNumber = getResultNumber(soup)
pageNumber = getPageNumber(resultNumber)
pageID = getPageID(soup)
print "查询结果数 :", resultNumber
print "总页面数量 :", pageNumber
print "正在获取第 1 页的结果"
results = getResultDic(soup)
print "该页所有结果获取成功 !"
if pageNumber > maxPage:
for page in range(maxPage):
print "正在获取第",(page + 1),"页的结果"
thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
tempContent = getContent(thisUrl)
tempSoup = BeautifulSoup(tempContent, "html.parser")
results += getResultDic(soup)
SIGN = input("已经获取了" + maxPage + "个页面 , 是否需要继续获取 ? [1/0]")
if SIGN == 1:
for page in range(maxPage, pageNumber):
print "正在获取第",(page + 1),"页的结果"
thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
tempContent = getContent(thisUrl)
tempSoup = BeautifulSoup(tempContent, "html.parser")
results += getResultDic(soup)
else:
for page in range(pageNumber):
print "正在获取第",(page + 1),"页的结果"
thisUrl = "http://www.dy2018.com/e/search/result/searchid-" + pageID + "-page-" + str(page) + ".html"
tempContent = getContent(thisUrl)
tempSoup = BeautifulSoup(tempContent, "html.parser")
results += getResultDic(soup)
print "数据获取完毕 ! "
# 格式化显示数据 :
for result in results:
file = open(modelName + "-" + keyword + ".txt","a+")
file.write("---------------------------\n")
file.write("标题 : " + result['title'] + "\n")
file.write("描述 : \n\t" + result['describe'] + "\n")
file.write("时间 : " + result['time'] + "\n")
file.write("点击量 : " + result['click'] + "\n")
file.write("下载地址 : " + "\n")
for downloadlink in result['downloadLink']:
file.write("\t" + downloadlink + "\n")
file.write("\n")
file.close()
[懒人福利]用Python进行[天堂电影]下载资源的搜索
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...