from bs4 import BeautifulSoup
import requests
start_url = 'http://bj.ganji.com/wu/'
url_host = 'http://bj.ganji.com'
def get_index_url(url):
# url = start_url
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
links = soup.select('.fenlei > dt > a')
for link in links:
page_url = url_host + link.get('href')
print(page_url)
get_index_url(start_url)
channel_list = '''
http://bj.ganji.com/jiaju/
http://bj.ganji.com/rirongbaihuo/
http://bj.ganji.com/shouji/
http://bj.ganji.com/shoujihaoma/
http://bj.ganji.com/bangong/
http://bj.ganji.com/nongyongpin/
http://bj.ganji.com/jiadian/
http://bj.ganji.com/ershoubijibendiannao/
http://bj.ganji.com/ruanjiantushu/
http://bj.ganji.com/yingyouyunfu/
http://bj.ganji.com/diannao/
http://bj.ganji.com/xianzhilipin/
http://bj.ganji.com/fushixiaobaxuemao/
http://bj.ganji.com/meironghuazhuang/
http://bj.ganji.com/shuma/
http://bj.ganji.com/laonianyongpin/
http://bj.ganji.com/xuniwupin/
http://bj.ganji.com/qitawupin/
http://bj.ganji.com/ershoufree/
http://bj.ganji.com/wupinjiaohuan/
http://bj.ganji.com/jiaju/
http://bj.ganji.com/rirongbaihuo/
http://bj.ganji.com/shouji/
http://bj.ganji.com/shoujihaoma/
http://bj.ganji.com/bangong/
http://bj.ganji.com/nongyongpin/
http://bj.ganji.com/jiadian/
http://bj.ganji.com/ershoubijibendiannao/
http://bj.ganji.com/ruanjiantushu/
http://bj.ganji.com/yingyouyunfu/
http://bj.ganji.com/diannao/
http://bj.ganji.com/xianzhilipin/
http://bj.ganji.com/fushixiaobaxuemao/
http://bj.ganji.com/meironghuazhuang/
http://bj.ganji.com/shuma/
http://bj.ganji.com/laonianyongpin/
http://bj.ganji.com/xuniwupin/
http://bj.ganji.com/qitawupin/
http://bj.ganji.com/ershoufree/
http://bj.ganji.com/wupinjiaohuan/
'''
get_index_url(start_url)
import time
from page_parsing import url_list
while True:
print(url_list.find().count())
time.sleep(5)
from multiprocessing import Pool
from page_parsing import get_item_info_from,url_list,item_info,get_links_from
from channel_extracing import channel_list
db_urls = [item['url'] for item in url_list.find()]
index_urls = [item['url'] for item in item_info.find()]
x = set(db_urls)
y = set(index_urls)
rest_of_urls = x-y
# def get_all_links_from(channel):
# for i in range(1,100):
# get_links_from(channel,i)
if __name__ == '__main__':
pool = Pool(processes=6)
# pool = Pool()
pool.map(get_all_links_from,channel_list.split())
pool.close()
pool.join()
from multiprocessing import Pool
from page_parsing import get_item_info_from,url_list,item_info,get_links_from
from channel_extracing import channel_list
db_urls = [item['url'] for item in url_list.find()]
index_urls = [item['url'] for item in item_info.find()]
x = set(db_urls)
y = set(index_urls)
rest_of_urls = x-y
# def get_all_links_from(channel):
# for i in range(1,100):
# get_links_from(channel,i)
if __name__ == '__main__':
pool = Pool(processes=6)
# pool = Pool()
pool.map(get_all_links_from,channel_list.split())
pool.close()
pool.join()
from bs4 import BeautifulSoup
import requests
import time
import pymongo
import random
client = pymongo.MongoClient('localhost', 27017)
ganji = client['ganji']
url_list = ganji['url_list']
item_info = ganji['item_info']
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',
'Connection':'keep-alive'
}
# http://cn-proxy.com/
proxy_list = [
'http://117.177.250.151:8081',
'http://111.85.219.250:3129',
'http://122.70.183.138:8118',
]
proxy_ip = random.choice(proxy_list) # 随机获取代理ip
proxies = {'http': proxy_ip}
# spider 1
def get_links_from(channel, pages, who_sells='o'):
# http://bj.ganji.com/ershoubijibendiannao/o3/
# o for personal a for merchant
list_view = '{}{}{}/'.format(channel, str(who_sells), str(pages))
wb_data = requests.get(list_view,headers=headers,proxies=proxies)
soup = BeautifulSoup(wb_data.text, 'lxml')
if soup.find('ul', 'pageLink'):
for link in soup.select('.fenlei dt a'):
item_link = link.get('href')
url_list.insert_one({'url': item_link})
print(item_link)
# return urls
else:
# It's the last page !
pass
# spider 2
def get_item_info_from(url,data=None):
wb_data = requests.get(url,headers=headers)
if wb_data.status_code == 404:
pass
else:
soup = BeautifulSoup(wb_data.text, 'lxml')
data = {
'title':soup.title.text.strip(),
'price':soup.select('.f22.fc-orange.f-type')[0].text.strip(),
'pub_date':soup.select('.pr-5')[0].text.strip().split(' ')[0],
'area':list(map(lambda x:x.text,soup.select('ul.det-infor > li:nth-of-type(3) > a'))),
'cates':list(soup.select('ul.det-infor > li:nth-of-type(1) > span')[0].stripped_strings),
'url':url
}
print(data)
# item_info.insert_one(data)
get_item_info_from('http://bj.ganji.com/ershoubijibendiannao/1927955773x.htm')