Python实战计划学习笔记(3)服务器网页解析

基本步骤

  1. 服务器与本地的交换机制
  • 请求:get,post,head,put,options,connect,trace,delete
    GET /page_one.html HTTP/1.1 Host:www.sample.com
  • 回应:status_code, 网页内容
  1. 解析真实网页获取数据的办法

练习1代码

from bs4 import BeautifulSoup
import requests
url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
web_data = requests.get(url)
soup=BeautifulSoup(web_data.text,'lxml')
titles = soup.select('div.property_title > a[target="_blank"]')
imgs = soup.select('img["width=160"]')
cates = soup.select('div.p13n_reasoning_v2')
#print(cates)

for title,img,cate in zip(titles,imgs,cates):
    data = {
        'title':title.get_text(),
        'img':img.get('src'),
        'cate':list(cate.stripped_strings)
    }
    print(data)

练习2代码

from bs4 import BeautifulSoup
import requests
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    'Cookie':'ServerPool=B; TASSK=enc%3AJlqgR8kGqJx%2BQ5G1prQO4VrB7mIO388KW48MPK8YGrx3oNqxz4Qrivyzv1qAOmJq4wzYHg6Nnt8%3D; TAUnique=%1%enc%3A4gvUQzUyygu%2FGv6dv3X4cs0eZt5Hvxu2QJt2GYFC5BMrYJESy8uhPw%3D%3D; TART=%1%enc%3Avxr%2Bnb91%2BHKnfVAUrIqY9Rvc7dTzOqGqhbyXR%2Btwq8%2Ffn34Q98xKWNHL2kWDcnWEugT2h1gxo1I%3D; __gads=ID=53237a462a95aa19:T=1472095261:S=ALNI_MaslV_D02aAx6nI-TrYwNCRLdRJJA; CommercePopunder=SuppressAll*1472095465187; SecureLogin2=3.4%3AAKSSWi38zqX%2BT3fkhPOSQgr0%2BHVz2LPSSZT5FanVs%2BO9tX1QLwadr4feDj8yNpzz3uRopyCCn1UeieXMis6p3M12s0WhvKsBKWKxbi5iisSFJ4%2FB1hTiSJBywPkN2evAQV1fqvwLYjrYEIANCVhebx%2BoNZxBfn3Q1sQLgcxhOChP3dqYNr3RlT7JY7dmyyahZd3PL%2FygRCLK4VhJQpo0Rn0%3D; TAAuth3=3%3Ad211cc1d8ec2dbe465e6227d613e4b4f%3AANmawThSa8sZUNHt%2FPQhS4VfIohzg%2BI%2FXTEedryGnHGUxlf24WWO%2B4hIm9t%2FBy8QhhzEPyUBHPvIgrXBGUiUWk2zCazoznlOv%2BSbuPz7IjzR0J%2F1wA0Ij%2FB8csfM2j%2BWFYJOY6hMqy1gAlYgQxq8upgWUnnppTDLQ08pl4ldgCs%2FeY72l8FFJVqJjWbBbkcu3Mh8DKobkbyjHkVwYomJyNx2MGRcEwRclHlmIE%2B5M0fk; TATravelInfo=V2*AC.TYO*A.2*MG.-1*HP.2*FL.3*RVL.60763_237l105127_238*RS.1; TAReturnTo=%1%%2FAttraction_Review-g60763-d105127-Reviews-Central_Park-New_York_City_New_York.html; roybatty=AHPxj2pWAnbIF7xdTtIzIDHe3hrkF9avYgmphcOELIFfjx9L%2Fx0w1fK35vHYbPylGbzpQxRasDfzRQ%2F12Wrsp7F7rLNDaXOhee5ChmK5XW2%2FXL%2BlyUlaCvssO%2Fa3zWvSiwIUilKmrD%2FHtv2e%2BAlh1SJ7V14c5PlxUuJ6bCV6TiQj%2C1; NPID=; TASession=%1%V2ID.E9C70A2D8C9F87BA04BD09846AECE4B9*SQ.48*PR.427%7C*LS.Saves*GR.52*TCPAR.51*TBR.82*EXEX.78*ABTR.51*PPRP.62*PHTB.28*FS.25*CPU.71*HS.popularity*ES.popularity*AS.popularity*DS.5*SAS.popularity*FPS.oldFirst*TS.87561D8E010E069629F3426C834EF8AF*LF.zhCN*FA.1*DF.0*LP.%2F*FBH.2*MS.-1*RMS.-1*FLO.60763*TRA.true*LD.105127; CM=%1%HanaPersist%2C%2C-1%7Cpu_vr2%2C%2C-1%7Ct4b-pc%2C%2C-1%7CHanaSession%2C%2C-1%7CRCPers%2C%2C-1%7CWShadeSeen%2C%2C-1%7Cpu_vr1%2C%2C-1%7CFtrPers%2C%2C-1%7CHomeASess%2C1%2C-1%7CAWPUPers%2C%2C-1%7Ccatchsess%2C5%2C-1%7Cbrandsess%2C%2C-1%7Csesscoestorem%2C%2C-1%7CCCSess%2C%2C-1%7CViatorMCPers%2C%2C-1%7Csesssticker%2C%2C-1%7C%24%2C%2C-1%7Ct4b-sc%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS2%2C%2C-1%7Cb2bmcpers%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS%2C%2C-1%7Csess_rev%2C2%2C-1%7Csessamex%2C%2C-1%7Cperscoestorem%2C%2C-1%7CSaveFtrPers%2C%2C-1%7Cpers_rev%2C%2C-1%7CMetaFtrSess%2C%2C-1%7CRBAPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_PERSISTANT%2C%2C-1%7CFtrSess%2C%2C-1%7CHomeAPers%2C%2C-1%7C+r_lf_1%2C%2C-1%7CRCSess%2C%2C-1%7C+r_lf_2%2C%2C-1%7Ccatchpers%2C3%2C1472700068%7CAWPUSess%2C%2C-1%7Cvr_npu2%2C%2C-1%7Csh%2C%2C-1%7CLastPopunderId%2C104-771-null%2C-1%7Cpssamex%2C%2C-1%7C2016sticksess%2C%2C-1%7Cvr_npu1%2C%2C-1%7CCCPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_SESSION%2C%2C-1%7Cbrandpers%2C%2C-1%7Cb2bmcsess%2C%2C-1%7C2016stickpers%2C%2C-1%7CViatorMCSess%2C%2C-1%7CWarPopunder_Session%2C%2C-1%7CWarPopunder_Persist%2C%2C-1%7CTakeOver%2C%2C-1%7Cr_ta_2%2C%2C-1%7Cr_ta_1%2C%2C-1%7CSaveFtrSess%2C%2C-1%7CRBASess%2C%2C-1%7Cperssticker%2C%2C-1%7CMetaFtrPers%2C%2C-1%7C; TAUD=LA-1472095257697-1*LG-22093740-2.1.F.*LD-22093742-.....'
}
url_saves = 'https://cn.tripadvisor.com/Saves#52709824'
def get_favs(url_saves,data=None):
    web_data = requests.get(url_saves,headers=headers)
    soup = BeautifulSoup(web_data.text,'lxml')
    titles = soup.select('a.location-name')
    imgs = soup.select('img.photo_image')
    metas = soup.select('span.format_address')

    for title,img,meta in zip(titles,imgs,metas):
        data = {
            'title':title.get_text(),
            'img':img.get('src'),
            'meta':list(meta.stripped_strings)
        }
        print(data)

练习3代码

两个函数

from bs4 import BeautifulSoup
import requests

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    'Cookie':'ServerPool=B; TASSK=enc%3AJlqgR8kGqJx%2BQ5G1prQO4VrB7mIO388KW48MPK8YGrx3oNqxz4Qrivyzv1qAOmJq4wzYHg6Nnt8%3D; TAUnique=%1%enc%3A4gvUQzUyygu%2FGv6dv3X4cs0eZt5Hvxu2QJt2GYFC5BMrYJESy8uhPw%3D%3D; TART=%1%enc%3Avxr%2Bnb91%2BHKnfVAUrIqY9Rvc7dTzOqGqhbyXR%2Btwq8%2Ffn34Q98xKWNHL2kWDcnWEugT2h1gxo1I%3D; __gads=ID=53237a462a95aa19:T=1472095261:S=ALNI_MaslV_D02aAx6nI-TrYwNCRLdRJJA; CommercePopunder=SuppressAll*1472095465187; SecureLogin2=3.4%3AAKSSWi38zqX%2BT3fkhPOSQgr0%2BHVz2LPSSZT5FanVs%2BO9tX1QLwadr4feDj8yNpzz3uRopyCCn1UeieXMis6p3M12s0WhvKsBKWKxbi5iisSFJ4%2FB1hTiSJBywPkN2evAQV1fqvwLYjrYEIANCVhebx%2BoNZxBfn3Q1sQLgcxhOChP3dqYNr3RlT7JY7dmyyahZd3PL%2FygRCLK4VhJQpo0Rn0%3D; TAAuth3=3%3Ad211cc1d8ec2dbe465e6227d613e4b4f%3AANmawThSa8sZUNHt%2FPQhS4VfIohzg%2BI%2FXTEedryGnHGUxlf24WWO%2B4hIm9t%2FBy8QhhzEPyUBHPvIgrXBGUiUWk2zCazoznlOv%2BSbuPz7IjzR0J%2F1wA0Ij%2FB8csfM2j%2BWFYJOY6hMqy1gAlYgQxq8upgWUnnppTDLQ08pl4ldgCs%2FeY72l8FFJVqJjWbBbkcu3Mh8DKobkbyjHkVwYomJyNx2MGRcEwRclHlmIE%2B5M0fk; TATravelInfo=V2*AC.TYO*A.2*MG.-1*HP.2*FL.3*RVL.60763_237l105127_238*RS.1; TAReturnTo=%1%%2FAttraction_Review-g60763-d105127-Reviews-Central_Park-New_York_City_New_York.html; roybatty=AHPxj2pWAnbIF7xdTtIzIDHe3hrkF9avYgmphcOELIFfjx9L%2Fx0w1fK35vHYbPylGbzpQxRasDfzRQ%2F12Wrsp7F7rLNDaXOhee5ChmK5XW2%2FXL%2BlyUlaCvssO%2Fa3zWvSiwIUilKmrD%2FHtv2e%2BAlh1SJ7V14c5PlxUuJ6bCV6TiQj%2C1; NPID=; TASession=%1%V2ID.E9C70A2D8C9F87BA04BD09846AECE4B9*SQ.48*PR.427%7C*LS.Saves*GR.52*TCPAR.51*TBR.82*EXEX.78*ABTR.51*PPRP.62*PHTB.28*FS.25*CPU.71*HS.popularity*ES.popularity*AS.popularity*DS.5*SAS.popularity*FPS.oldFirst*TS.87561D8E010E069629F3426C834EF8AF*LF.zhCN*FA.1*DF.0*LP.%2F*FBH.2*MS.-1*RMS.-1*FLO.60763*TRA.true*LD.105127; CM=%1%HanaPersist%2C%2C-1%7Cpu_vr2%2C%2C-1%7Ct4b-pc%2C%2C-1%7CHanaSession%2C%2C-1%7CRCPers%2C%2C-1%7CWShadeSeen%2C%2C-1%7Cpu_vr1%2C%2C-1%7CFtrPers%2C%2C-1%7CHomeASess%2C1%2C-1%7CAWPUPers%2C%2C-1%7Ccatchsess%2C5%2C-1%7Cbrandsess%2C%2C-1%7Csesscoestorem%2C%2C-1%7CCCSess%2C%2C-1%7CViatorMCPers%2C%2C-1%7Csesssticker%2C%2C-1%7C%24%2C%2C-1%7Ct4b-sc%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS2%2C%2C-1%7Cb2bmcpers%2C%2C-1%7CMC_IB_UPSELL_IB_LOGOS%2C%2C-1%7Csess_rev%2C2%2C-1%7Csessamex%2C%2C-1%7Cperscoestorem%2C%2C-1%7CSaveFtrPers%2C%2C-1%7Cpers_rev%2C%2C-1%7CMetaFtrSess%2C%2C-1%7CRBAPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_PERSISTANT%2C%2C-1%7CFtrSess%2C%2C-1%7CHomeAPers%2C%2C-1%7C+r_lf_1%2C%2C-1%7CRCSess%2C%2C-1%7C+r_lf_2%2C%2C-1%7Ccatchpers%2C3%2C1472700068%7CAWPUSess%2C%2C-1%7Cvr_npu2%2C%2C-1%7Csh%2C%2C-1%7CLastPopunderId%2C104-771-null%2C-1%7Cpssamex%2C%2C-1%7C2016sticksess%2C%2C-1%7Cvr_npu1%2C%2C-1%7CCCPers%2C%2C-1%7CWAR_RESTAURANT_FOOTER_SESSION%2C%2C-1%7Cbrandpers%2C%2C-1%7Cb2bmcsess%2C%2C-1%7C2016stickpers%2C%2C-1%7CViatorMCSess%2C%2C-1%7CWarPopunder_Session%2C%2C-1%7CWarPopunder_Persist%2C%2C-1%7CTakeOver%2C%2C-1%7Cr_ta_2%2C%2C-1%7Cr_ta_1%2C%2C-1%7CSaveFtrSess%2C%2C-1%7CRBASess%2C%2C-1%7Cperssticker%2C%2C-1%7CMetaFtrPers%2C%2C-1%7C; TAUD=LA-1472095257697-1*LG-22093740-2.1.F.*LD-22093742-.....'
}
url_saves = 'https://cn.tripadvisor.com/Saves#52709824'
url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'

def get_attractions(url,data=None):
    web_data = requests.get(url)
    soup=BeautifulSoup(web_data.text,'lxml')
    titles = soup.select('div.property_title > a[target="_blank"]')
    imgs = soup.select('img["width=160"]')
    cates = soup.select('div.p13n_reasoning_v2')
    #print(cates)
    for title,img,cate in zip(titles,imgs,cates):
        data = {
            'title':title.get_text(),
            'img':img.get('src'),
            'cate':list(cate.stripped_strings)
        }
        print(data)

def get_favs(url_saves,data=None):
    web_data = requests.get(url_saves,headers=headers)
    soup = BeautifulSoup(web_data.text,'lxml')
    titles = soup.select('a.location-name')
    imgs = soup.select('img.photo_image')
    metas = soup.select('span.format_address')

    for title,img,meta in zip(titles,imgs,metas):
        data = {
            'title':title.get_text(),
            'img':img.get('src'),
            'meta':list(meta.stripped_strings)
        }
        print(data)

get_attractions(url)
get_favs(url_saves)

练习4代码

延时爬取所有条目

from bs4 import BeautifulSoup
import requests
import time

url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'
urls=['https://cn.tripadvisor.com/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST'.format(str(i)) for i in range(30,930,30)]

def get_attractions(url,data=None):
    web_data = requests.get(url)
    time.sleep(4)   #延时请求,避免被网站列入黑名单
    soup=BeautifulSoup(web_data.text,'lxml')
    titles = soup.select('div.property_title > a[target="_blank"]')
    imgs = soup.select('img["width=160"]')
    cates = soup.select('div.p13n_reasoning_v2')
    #print(cates)
    for title,img,cate in zip(titles,imgs,cates):
        data = {
            'title':title.get_text(),
            'img':img.get('src'),
            'cate':list(cate.stripped_strings)
        }
        print(data)

for single_url in urls:
    get_attractions(single_url)

练习5代码

伪装移动端爬取真实图片地址

from bs4 import BeautifulSoup
import requests

#通过伪造User-Agent信息伪装iPhone
headers = {
    'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'
}
url='https://cn.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html'

info=[]
web_data = requests.get(url,headers=headers)
soup=BeautifulSoup(web_data.text,'lxml')
#print(soup)
titles= soup.select('div.location')
imgs = soup.select('div.thumb.thumbLLR.soThumb > div.missing.lazyMiss')
for title,img in zip(titles,imgs):
    data = {
        'title':title.get_text()[1:-1],
        'img':img.get('data-thumburl')
    }
    print(data)
    info.append(data)
print(info)
print('共有',len(info),'条记录')

运行结果


1.jpg
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 213,928评论 6 493
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 91,192评论 3 387
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 159,468评论 0 349
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 57,186评论 1 286
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 66,295评论 6 386
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 50,374评论 1 292
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,403评论 3 412
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 38,186评论 0 269
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,610评论 1 306
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,906评论 2 328
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 39,075评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,755评论 4 337
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 40,393评论 3 320
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 31,079评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,313评论 1 267
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,934评论 2 365
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,963评论 2 351

推荐阅读更多精彩内容