import requests
from pyquery import PyQuery as pq
from requests.exceptions import RequestException
import csv
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
}
def get_link_detail(url):
try:
resp = requests.get(url, headers=HEADERS)
text_code = resp.apparent_encoding
text = resp.content.decode(text_code, 'ignore')
html = pq(text)
trs = html(".news-text table tbody tr")
list_data = []
for tr in trs.items():
dict_data = {}
dict_data['sort'] = tr('td:nth-child(1)').text()
dict_data['name'] = tr('td:nth-child(2) div').text()
dict_data['city'] = tr('td:nth-child(3)').text()
dict_data['score'] = tr('td:nth-child(4)').text()
dict_data['indicator5'] = tr('td.indicator5').text()
dict_data['indicator6'] = tr('td.indicator6').text()
dict_data['indicator7'] = tr('td.indicator7').text()
dict_data['indicator8'] = tr('td.indicator8').text()
dict_data['indicator9'] = tr('td.indicator9').text()
list_data.append(dict_data)
return list_data
except RequestException as e:
print("发生错误了")
def write_data(data):
headers = ['sort', 'name', 'city', 'score', 'indicator5', 'indicator6',
'indicator7', 'indicator8', 'indicator9']
with open('data.csv', 'w', encoding="utf-8", newline='') as fp:
writer = csv.DictWriter(fp, headers)
writer.writeheader()
writer.writerows(data)
if __name__ == '__main__':
url = 'http://zuihaodaxue.com/zuihaodaxuepaiming2019.html'
data = get_link_detail(url)
write_data(data)
requests+pyquery+csv爬取做好大学排行榜
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...