Python3之百度贴吧小爬虫

华为贴吧爬虫

import urllib.request
from bs4 import BeautifulSoup
import csv
import time
import random

#计算运行时间
start_time = time.time()

#保存到csv中
csvFile = open(r"E:\Python\Projects\贴吧\华为\huawei.csv",'a+',newline='')
writer =  csv.writer(csvFile)writer.writerow(('posting_num','posting_title','posting_coments_num','posting_user_link','posting_user_name'))

#每页加50,共6940页
base_url = 'http://tieba.baidu.com/f?kw=%E5%8D%8E%E4%B8%BA&ie=utf-8&pn='
posting_num = 1  #计数爬取到第几个帖子
for page in range(0,6942):  #一共6942页   
    time_delay = random.randint(1, 3)  # 设置随机延迟时间,防止频繁的爬取导致百度封锁ID    
    url = base_url + str(page * 50)    
    html = urllib.request.urlopen(url)    
    bsObj = BeautifulSoup(html,'lxml')    
    posting_list = bsObj.find_all('div',{'class':'t_con cleafix'})    #查找标题块内各个信息, 标题、回复数、发帖人       
   
    print('============================')        
    print('正在抓取华为贴吧第%d页' % page)    now_time = time.time()      
    has_spent_seconds = now_time - start_time    
    has_spent_time_int = int((now_time - start_time) / 60)    
    print('华为号小爬虫已耗时%d分钟' % has_spent_time_int)    
    if page > 1:        
        will_need_time = ((6940 * has_spent_seconds) / page)/60        
        will_need_time = int(will_need_time)        
        print('华为号小爬虫还要爬%d分钟'%will_need_time)    
    #页面查找posting_coments_num,
    for posting in posting_list:
        try:
            # posting_coments_num
            posting_coments_num = posting.contents[1].span.contents[0]

            #posting_user_name
            posting_user_name =  posting.contents[3].span.contents[1].a.contents[0]

            #posting_user_link
            posting_user_link = 'http://tieba.baidu.com' + posting.contents[3].span.contents[1].a.attrs['href']

            #posting_title
            posting_title = posting.contents[3].contents[1].contents[1].a.attrs['title']

            #帖子数加1
            posting_num = posting_num + 1

            #数据保存
            writer.writerow((posting_num, posting_title, posting_coments_num, posting_user_link, posting_user_name))

        except:
            continue

    #抓数据每翻一页休息时间
    time.sleep(time_delay)
    #抓取了十页就休息3秒
    if page in list(range(1,6940,10)):
        time.sleep(3)


# 遍历完网站关闭csvFile
csvFile.close()

end_time = time.time()
duration_time = int((end_time - start_time)/60)
print('程序运行了%d分钟'%duration_time)

程序爬了6000页就被百度封掉,你们回去可以改下贴吧的,比如爬小米吧或者其他娱乐的吧,将页面数改成小于6000的,应该不会被封掉。按照我写的代码,我爬6000页用了180分钟。结果如图,403forbiden,被百度封掉了。

QQ截图20160910073601.png

爬这些也可以了,csv文件39M,共计300000条帖子信息。
代码文件链接: https://pan.baidu.com/s/1i4H4MTB 密码: uwb7

每周有直播哦,扫码即可加入
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容