笔记2

# -*- coding: utf-8 -*-
"""
Created on Mon Jul  8 16:59:12 2019

@author: Administrator
"""


import requests
import re
from lxml import etree
import json
import time
import random
import collections
from pymongo import MongoClient


headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}


"""
# 提取json数据文本
pat_json = re.compile('</script><script id="js-initialData" type="text/json">(.*)</script><script src="https://static.zhihu.com/heifetz/vendor.6c16e03dca561b828324.js">')
response_json = re.findall(pat_json,response.text)[0]

# 解析json数据
json.loads(response_json)

# 取出关注人的账号
user_following = list(json.loads(response_json ).get('initialState')['entities']['users'].keys())[1:]

# 构造关注人follow页面网址
user_urls = ['https://www.zhihu.com/people/'+name+'/following' for name in user_following]

"""
# 定义单用户爬虫函数
def spider(user_urls):
    # 对用户网址做出响应,添加用户代理
    response = requests.get(user_urls,headers=headers)
    #设置等待时间,反正爬虫被发现
    time.sleep(random.randint(3,6))
    # 找出json数据文本
    pat_json = re.compile('</script><script id="js-initialData" type="text/json">(.*)</script><script src="https://static.zhihu.com/heifetz/vendor.6c16e03dca561b828324.js">')
    response_json = re.findall(pat_json,response.text)[0]
    # 解析json数据并提取用户所关注人的账号
    user_following = list(json.loads(response_json ).get('initialState')['entities']['users'].keys())[1:]
    # 构造用户所关注的人的网址
    other_urls = ['https://www.zhihu.com/people/'+name+'/following' for name in user_following]
    # 构造选择器
    selector = etree.HTML(response.text)
    # 提取出昵称
    name = selector.xpath('//*[@class="ProfileHeader-name"]/text()')[0]
    # 提取出行业职位
    try:
        prefession = selector.xpath('//*[@class="ProfileHeader-infoItem"]/text()')[0]
    except:
        prefession = ''
    # 提取出关注人数
    try:
        following = selector.xpath('//*[@class="Card FollowshipCard"]/div/a[1]/div/strong/text()')[0]
    except:
        following = ''
    # 提取出被关注人数
    try:
        follower = selector.xpath('//*[@class="Card FollowshipCard"]/div/a[2]/div/strong/text()')[0]
    except:
        follower = ''
    
    print(name)
    collection.insert_one({'name':name,'prefession':prefession,'following':following,'follower':follower})
    return other_urls
       
    
    
# 待爬取url加入队列 
next_crawl_urls = collections.deque()
# 初始化一个urls,再进入循环
next_crawl_urls.append('https://www.zhihu.com/people/asdfghjkl-54-99/following')


# 连接数据库,初始化cliet
client = MongoClient()
# 建立数据库
db = client.zhihu
# 建立数据表
collection = db.user



# 已经被爬取的url,利用集合是唯一性进行去重
crawled_urls = set() 
    
    
    
while True:
    # 使用popleft方法从待爬取的uels中提取其中一个url后,并删除已提取的url
    url = next_crawl_urls.popleft()
    try:
        other_urls = spider(url)
        
        crawled_urls.add(url)
        
        # 待爬取减去已经爬取的,得出未爬取过的urls
        no_crawl_url = set(other_urls) - crawled_urls
        
        # 将未爬取的urls放进待爬取的队列中
        next_crawl_urls.extend(no_crawl_url)
    except:
        pass
    ```
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容