【Python爬虫】爬取专题所有文章和提交次数

import requests,csv
from lxml import etree


#获取总页数
def get_number(url):
    res = requests.get(url).text
    select = etree.HTML(res)
    text = select.xpath('//div[@class="info"]/text()')[0]
    count_text = text.split('·')[0]#以 · 来分割字符串取前半部
    # print(OnlyCharNum(count_text))
    number =int(OnlyCharNum(count_text))
    page = number//10 + 2 if number%10 > 0 else number//10+1
    # print(page)
    for i in range(1,page):
        number_url = numbers_url.format(i)
        # print(number_url)
        get_article(number_url)
    print(pd)
    for k,v in pd.items():
        # print(k,len(v),v)
        writer.writerow((k, len(v),v))


def get_article(url):
    res = requests.get(url).text
    select = etree.HTML(res)
    names = select.xpath('//a[@class="blue-link"]/text()')
    titles = select.xpath('//a[@class="title"]/text()')
    # print(title)
    for i in range(0, len(names)):
        if names[i] in pd:
            pd[names[i]].append(titles[i])
        else:
            pd[names[i]] = [titles[i]]
        # if pd.has_key(names[i]):
        #     print(titles[i])

#只取数字
def OnlyCharNum(s,oth=''):
    s2 = s.lower()
    fomart = '0123456789'
    for c in s2:
        if not c in fomart:
            s = s.replace(c,'')
    return s

if __name__ == '__main__':
    pd = {}
    f = open('zhuanti.csv', 'w+', encoding='utf-8')
    writer = csv.writer(f)
    writer.writerow(('简书名', '次数', '提交的title集合'))

    baseurl = 'http://www.jianshu.com/c/1b31f26b6af0'
    numbers_url = 'http://www.jianshu.com/c/1b31f26b6af0?order_by=added_at&page={}'
    get_number(baseurl)
屏幕快照 2017-08-09 下午8.23.03.png
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容