import requests
import urllib.request
from bs4 import BeautifulSoup
import html
# req = urllib.request.Request('https://www.point2homes.com/CA/Real-Estate-Listings/ON/Toronto/Danforth-Village.html')
# # print(f.read(100).decode('utf-8'))
#
headers = {
'Accept' : 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'en-US,en;q=0.9',
'Cookie' : 'ar_debug=1; APC=AfxxVi7OzB82gR35o-yPJfx6oZQlY0uZybSM72afJ9QXJ4NdlSQN6A; ',
'Referer' : 'https://541a78665b02f02c693bbe75addf8a7a.safeframe.googlesyndication.com/',
'Sec-Ch-Ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'Sec-Ch-Ua-Mobile' : '?0',
'Sec-Ch-Ua-Platform' : '"macOS"',
'Sec-Fetch-Dest' : 'image',
'Sec-Fetch-Mode' : 'no-cors',
'Sec-Fetch-Site' : 'cross-site',
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/',
'X-Client-Data' : 'CJG2yQEIorbJAQipncoBCJ/jygEIk6HLAQiGoM0BCLnKzQEIks/NARj1yc0B',
}
# req.add_header('user-agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36')
# f = urllib.request.urlopen(req)
# print(f.read(20000).decode('utf-8'))
page = requests.get('https://www.point2homes.com/CA/Real-Estate-Listings/ON/Toronto/Danforth-Village.html', headers=headers)
print(page.status_code)
soup = BeautifulSoup(page.text, 'html.parser')
# requests.request()
# print(print(soup.prettify()))
print('bs find')
# li_list = soup.findAll('li', attrs={'data-label':"Beds", 'class':"ic-beds"} )
sr_list = soup.findAll('div', attrs={'id':"search-results-list"})
l1 = sr_list[0].find('div', attrs={'class':'listings'})
l2 = l1.find('ul', attrs={'class':'items'})
li_list = l2.find_all("li", attrs = {})
print(len(li_list))
for li in li_list:
print('element')
# print(li)
try:
print(li)
# TODO: li not correct
# print(li.find('div', attrs= {'class' : 'item-address'})['data-address'])
# print(li.find('li', attrs= {'data-label' : 'Beds'}).text)
# print(li.find('li', attrs= {'data-label' : 'Baths'}).text)
# print(li.findAll('div', attrs= {'class' : 'price '})['data-price'])
except:
# print('found none')
print('')
# li_list = l2[0].findAll('li', attrs={})
# add_list = soup.findAll('div', attrs={'class':"address-container"})
# print(l2)
# for e in l2[0]:
# print("one element")
# print(e.findAll('div', attrs= {'class' : 'item-address'})['data-address'])
# print(len(li_list))
# print(len(add_list))
# for bed in li_list:
# # print(bed)
# # print(add)
# print(bed.findAll(text = True))
# print(add_list)
# for add in add_list:
# print(add['data-address'])
过程中的疑点
request 到了403.
解决办法: 加cookie要beautifulsoup吗? 我一般搜东西然后写个复杂的正则 然后sublime multiline select 呢。
bs4还是挺简单的,一个find all 可以找所有
不过findall 有个问题, 不同层级的,比如li, 你findall li就会都拿下来。 目前还没解决。
-- 写攻略确实挺难的