总结:照搬照抄式的投机取巧不可行,还是得自己一步一步慢慢看,跑一跑代码,不对的地方百度找办法解决,比如今天明白了爬虫时要设置User-Agent,好假装浏览器去访问要爬取的网页。
第一段来自:前程无忧爬虫实战(通过输入关键字爬取任意职位并自动保存为.csv文本) - 简书
```
import csv
import re
import requests
from lxmlimport etree
headers = {
"cache-control":"no-cache",
"postman-token":"72a56deb-825e-3ac3-dd61-4f77c4cbb4d8",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36",
}
def get_url(key1):
try:
i=0
url ="https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,1.html"
response = requests.get(url.format(key1), headers=headers)
html = etree.HTML(response.content.decode('gbk'))
max_page =int("".join(re.findall('(\d+)',"".join(html.xpath("//span[@class='td']/text()")))))
while True:
i+=1
url ="https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html"
url = url.format(key1,i)
print("*"*100)
print("正在爬取第%d页" % i)
print("*"*100)
yield url
#print("正在爬取%d页"%i)
if max_page == i:
break
except:
print("获取不到链接,已处理")
def pase_page(key1):
try:
for iin get_url(key1):
url = i
#print(url)
response = requests.get(url,headers=headers)
html = etree.HTML(response.content.decode('gbk'))# 解码成gbk后输出,请求的是gbk,但是python默认的是
#输出的是utf-8,所以把utf-8解码成gbk就可以输出了,这样请求和输出就一样了,decode 相当于输出
#编码的输入和输出要一致。
lists = html.xpath("//div[@id='resultList']//div[@class='el']")
for listin lists:
item = {}
item["公司"] ="".join(list.xpath("./span[@class='t2']/a/text()")).replace('\r\n', '').replace(' ', '')
item["职位"] ="".join(list.xpath("./p/span/a/text()")).replace('\r\n', '').replace(' ', '')
item["工作地点"] ="".join(list.xpath("./span[@class='t3']/text()")).replace('\r\n', '').replace(' ', '')
item["薪资"] ="".join(list.xpath("./span[@class='t4']/text()")).replace('\r\n', '').replace(' ', '')
item["发布时间"] ="".join(list.xpath("./span[@class='t5']/text()")).replace('\r\n', '').replace(' ', '')
yield item
except:
print("返回数据异常,已处理")
def save_excel(key1):
try:
header = [ '公司','职位', '工作地点', '薪资', '发布时间']
with open(key1+'前程无忧职位信息.csv', 'w', newline='')as f:# w是写入
# 标头在这里传入,作为第一行数据
writer = csv.DictWriter(f, header)
writer.writeheader()
for iin pase_page(key1):
item = i
header = ['公司','职位', '工作地点', '薪资', '发布时间']
with open(key1+'前程无忧职位信息.csv', 'a', newline='')as f:# a是追加
writer = csv.DictWriter(f, header)
writer.writerow(item)
#print(item)
except:
print("保存数据异常,已处理")
if __name__ =='__main__':
key1 =input('请输入要爬取的职位:')
save_excel(key1)
```
第二段来自:python入门015---python爬取前程无忧51job的职位信息并存入mysql数据库... - 简书
```
import re# 用来做正则匹配用
import requests# 用来做网络请求用
import xlwt# 用来创建excel文档并写入数据
# 要查询的职位
key ='数据分析师'
# 获取原码
def get_content(page):
headers = {'Host':'search.51job.com',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
url ='http://search.51job.com/list/000000,000000,0000,00,9,99,' + key +',2,' +str(page) +'.html'
r = requests.get(url, headers, timeout=5)
s = requests.session()
s.keep_alive =False
r.encoding ='gbk'
html = r.text
return html
# 匹配规则
def get(html):
reg = re.compile(
r'class="t1 ">.*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>',
re.S)# 匹配换行符
items = re.findall(reg, html)
return items
def excel_write(items, index):
# 爬取到的内容写入excel表格
for itemin items:# 职位信息
for iin range(0, 5):
# print item[i]
ws.write(index, i, item[i])# 行,列,数据
print(index)
index +=1
newTable ="test.xls" # 表格名称
wb = xlwt.Workbook(encoding='utf-8')# 创建excel文件,声明编码
ws = wb.add_sheet('sheet1')# 创建表格
headData = ['公司','职位', '地址', '薪资', '日期']# 表头部信息
for colnumin range(0, 5):
ws.write(0, colnum, headData[colnum], xlwt.easyxf('font: bold on'))# 行,列
# 查询1-10页的数据,这里的10可以改成你想查询多少页
for eachin range(1, 162):
index = (each -1) *50 +1
excel_write(get(get_content(each)), index)
wb.save(newTable)# 数据保存到excel表格
```