网址: http://maoyan.com/board/4?offset=01
爬取所有关键信息
#coding=utf-8
__author__ = 'zhujingwei'
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import requests
from bs4 import BeautifulSoup
from requests.exceptions import RequestException
import json
import codecs
import os
from multiprocessing import Pool
def getOnePage(url):
head = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
try:
response = requests.get(url,headers = head)
response.encoding = 'utf-8'
if response.status_code == 200:
print(response.text)
return response.text
return None
except RequestException:
return None
def parseOnePage(html):
indexsoup = BeautifulSoup(html,'html.parser')
indexlist = indexsoup.select('.board-index') #index
print(indexlist)
#cover image
imagelist = indexsoup.select('img[class="board-img"]')
movienamelist = indexsoup.select('p[class="name"] > a[data-act="boarditem-click"]')
print(movienamelist)
moviestarlist = indexsoup.select('p[class="star"]')
releasetimelist = indexsoup.select('p[class="releasetime"]')
scorelist = indexsoup.select('p[class="score"]')
resultlist = []
for index in range(len(indexlist)):
resultdict = {
'index' : indexlist[index].text,
'image' : imagelist[index].attrs['data-src'],
'title' : movienamelist[index].text.strip(),
'actor' : moviestarlist[index].text.strip().lstrip('主演:'),
'time' : releasetimelist[index].text.strip().lstrip('上映时间:'),
'score' : scorelist[index].text
}
writeToFile(resultdict)
resultlist.append(resultdict)
return resultlist
def writeToFile(content):
with codecs.open('result.txt','a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii=False)+'\n') #ensure_ascii=False json中文
f.close()
def delete_file_folder(src):
'''delete files and folders'''
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src,item)
delete_file_folder(itemsrc)
try:
os.rmdir(src)
except:
pass
def main(offset):
url = 'http://maoyan.com/board/4?offset=0'+ str(offset)
parseOnePage(getOnePage(url))
if __name__ == '__main__':
delete_file_folder('result.txt')
pool = Pool(processes = 10)
pool.map(main,[i*10 for i in range(10)])
'''