一、网页采集器
url = 'https://www.baidu.com/'
import requests
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
}
# 在发送请求的时候,携带headers字典
res = requests.get(url=url,headers=headers)
# 当前发起请求的请求头信息
print(res.request.headers)
# 设置访问服务器的时候,可以自己加上客户端的版本
python-requests/2.31.0
Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3-请求和响应2 Mobile/15E148 Safari/604.1
Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36
import requests
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
'Cookie':'BIDUPSID=F8F101FACAC168981180241787208F13; PSTM=1691297885; BAIDUID=6AB9F35C494C731DA8867F8B086C6A49:SL=0:NR=10:FG=1; BD_UPN=12314753; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; MSA_WH=390_844; H_WISE_SIDS_BFESS=131861_213345_214804_110085_244725_261723_236312_265881_266354_267074_264354_268031_269904_269051_271172_271269_270102_234295_234207_272282_263618_272466_272472_260335_273165_273149_273233_273389_273896_274140_273788_274422_274571_263750_275097_275235_269286_270538_275796_275011_275941_275854_276089_276121_276196_276311_276586_276590_276965_276767_276400_253022_277076_276211_277268_272333_276830_277354_277383_277236_251972_276454_271253_273981_277628_277636_275189_275258_270292_277784_275732_272318_276925_276665_277884_277951_278017_277997_259642_278058_278166_278163_277936_278248_277320_278335_278396_278414_274784_275167_278277_278451_278533_278572_278576_278636_274576_278703_278514_278803_277541_278791_278388_256739_8000054_8000133_8000143_8000149_8000157_8000159_8000164_8000172_8000178_8000185_8000203_8000208; BA_HECTOR=8181018hak81agak2ka52l8q1iifm5q1o; ZFY=YLPvl91jw6HJIzGcSO0JR2h3wLMgXmBaPJZ1IscIOCU:C; BAIDUID_BFESS=6AB9F35C494C731DA8867F8B086C6A49:SL=0:NR=10:FG=1; H_WISE_SIDS=131861_213345_214804_110085_244725_261723_236312_265881_266354_267074_264354_268031_269904_269051_271172_270102_234295_234207_272282_263618_272466_272472_260335_273165_273149_273233_273389_274140_273788_274422_274571_263750_275097_275235_269286_270538_275011_275941_275854_276121_276196_276311_276586_276590_276965_276767_253022_277076_277268_276830_277354_277383_277236_251972_276454_271253_273981_277628_277636_275189_275258_270292_277784_275732_272318_276925_276665_277884_277951_278017_277997_259642_278058_278166_278163_278248_277320_278335_278396_278414_274784_275167_278277_278451_278533_278572_278576_278636_274576_278703_278514_278803_277541_278791_278388_256739_278955_278962_278923_274283_279020_279044_277523_276423_279135_278365_279266_279278_8000054_8000133_8000143_8000149_8000157_8000159_8000164_8000172_8000178_8000185_8000203_8000208; plus_lsv=3965f6be7add0277; plus_cv=1::m:f3ed604d; Hm_lvt_12423ecbc0e2ca965d84259063d35238=1697112448; SE_LAUNCH=5%3A28285207; rsv_i=c132uGBME4eVr3KPa86uEiPMGS01ZQuEQV3bAINXlwQKrsxMsdKFAMlsjOld2gSahhSDzZTb9t3L6wQyQb9SyhfAawDOV7Y; Hm_lpvt_12423ecbc0e2ca965d84259063d35238=1697112697; BD_CK_SAM=1; PSINO=6; H_PS_PSSID=39221_39398_39396_39418_39414_39436_39481_39308_39463_39233_39403_39467_26350_39421; delPer=0; B64_BOT=1; H_PS_645EC=3af8bU0wredNi%2Boxpwa5YU6SEhTgyzPNJvfTjy3HBH0IGPsphkELgL5iaVY; baikeVisitId=1aad0187-d1f5-4f6b-bc78-483231b5230c; COOKIE_SESSION=251_1_8_9_6_7_1_0_6_4_0_0_236_0_3_0_1697109193_1697108341_1697109190%7C9%2386939_194_1697108337%7C9'
}
# 定义一个字典,保存请求参数 路由?请求参数 wd=coco
p = {
'wd':'coco'
}
# 确定目标
url = 'https://www.baidu.com/s'
# 发起请求
res = requests.get(url,headers=headers,params=p)
res.encoding = 'utf-8'
print(res.text)
二、豆瓣电影
url = 'https://movie.douban.com/j/chart/top_list'
import requests
head = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
'''
通过requests模块进行数据爬取无法每次都实现所见即所得
有些数据不是通过地址栏的url请求到的,而是通过其他的方式请求到数据(动态加载数据),通过浏览器抓包工具进行局部搜索
'''
p = {
'type':24,
'interval_id': '100:90',
'action':'',
'start':0,
'limit':'10'
}
res = requests.get(url,headers=head,params=p)
# print(res.text)
# 该url返回的数据是一个json数据
# print(res.json())
'''
for 变量名 in 列表数据:
变量名拿到的是列表中的每一条数据,这条数据的类型是字典类型
字典类型的取值方式拿到目标数据
字典的取值方式:字典名[键]
'''
for i in res.json(): # [{},{}]
print(i['title'],i['score'])
# i = {'title':1,'score':11} 字典是没有下标,字典要获取值,只能通过键
# # 1 11
# i['title']
# i['score']
三、地址采集
import requests
url = 'https://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
for i in range(1,6): # [1,2,3-请求和响应2,4,5]
# 携带表单数据
a = {
'cname':'',
'pid':'',
'keyword': '长沙',
'pageIndex': i, # 每循环一次,i的值都会发生改变
'pageSize': 10 # 每页展示10条数据
}
# 发送的请求方式是为post
res = requests.post(url,data=a) # data参数是post方法中处理参数动态化的
page_text = res.json() # 字典类型数据
# print(page_text['Table1'][0]['addressDetail'])
# print(page_text['Table1'][1]['addressDetail'])
# print(page_text)
for i in page_text['Table1']: # page_text['Table1'] == [{},{},{}]
print(f"餐厅名称:{i['storeName']},餐厅地址:{i['addressDetail']}")
四、cookie
import requests
url = 'https://xueqiu.com/statuses/hot/listV2.json?since_id=-1&max_id=550023&size=15'
# headers = {
# 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
# 'Cookie':'cookiesu=711695280918033; device_id=094f7ffe9116a8e397687250dcb2a9d4; acw_tc=2760827916971184820005089ee547bfef44ac40996714cd9435c5d409e779; xq_a_token=f56996335e7084331652c772453245851c42d712; xqat=f56996335e7084331652c772453245851c42d712; xq_r_token=68ec0796266e4d11f48e617345e033abbb1e4430; xq_id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJ1aWQiOi0xLCJpc3MiOiJ1YyIsImV4cCI6MTY5OTQwMzg3MywiY3RtIjoxNjk3MTE4NDI1NjE0LCJjaWQiOiJkOWQwbjRBWnVwIn0.BR4aSRyJ6c2UFzd69F-nDHOo-Jhh8BH7bSszYpSlYT6b5WSRpfDbsUcTiYlUHTy0-azOhnN5KzQdHgoZ2DXz9XYHRYGp_5Dgg5lyn3quk6h3TawbAZUnGinp68rBZovQIRLk2EX1_cYw9RSQYrsNz3btQ-zohim4gX6rGaMZt0_NCyY5RPfBMyF4ZBX2LaOzjVGIsvu7buPBqTxKFsNAJqv4oc-SmvA-S8batOEAcmuG246MLoEy5E179xPICvb7LkxztTrD81DVT4j3rBPe82two_vNQZ5vHQpOR3EYCeQ01k2m2QS2Om771Xz9HJxkRp15nB59lx1XOZXCulp-1g; u=711695280918033; Hm_lvt_1db88642e346389874251b5a1eded6e3=1695280920,1697118483; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1697118483'
# }
#
# #
# # res = requests.get(url,headers=headers)
# # print(res.json())
headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'}
# 自动处理cookie
session = requests.Session() # 创建session对象
# 先从目标网站中获取并且存储cookie,
session.get('https://xueqiu.com/',headers=headers)
# 发起请求
page_text = session.get(url,headers=headers).json()
print(page_text)