import requests
from bs4 import BeautifulSoup
def trade_spider(max_pages):
page = 1
while page <= max_pages:
url = "https://buckysroom.org/trade/search.php?page=" + str(page)
source_code = requests.get(url)
# just get the code, no headers or anything
plain_text = source_code.text
# BeautifulSoup objects can be sorted through easy
soup = BeautifulSoup(plain_text)
for link in soup.findAll('a', {'class': 'item-name'}):
href = "https://buckysroom.org" + link.get('href')
title = link.string # just the text, not the HTML
print(href)
print(title)
# get_single_item_data(href)
page += 1
def get_single_item_data(item_url):
source_code = requests.get(item_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
# if you want to gather information from that page
for item_name in soup.findAll('div', {'class': 'i-name'}):
print(item_name.string)
# if you want to gather links for a web crawler
for link in soup.findAll('a'):
href = "https://buckysroom.org" + link.get('href')
print(href)
trade_spider(1)
Python 25 Programming Tutorial - How to Make a Web Crawler
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。