python编写爬取大鱼号的文章统计

# -*- conding: utf8 -*-
"""
auauthor : soliton/念旧
Email : soliton.wang@gmail.com
QQ    : 1670829014
"""

# -*- conding: utf8 -*-
"""
auauthor : soliton/念旧
EnEamel : soliton.wang@gmail.com
QQ    : 1670829014
"""

from tkinter import *
import asyncio
import random
from pyppeteer.launcher import launch
from lxml import etree
import time
import xlwt
import os
root = Tk()
root.title('大鱼号')
root.minsize(400, 500)
root.maxsize(400, 500)
webdriver_js = '''() =>{
           Object.defineProperties(navigator,{
             webdriver:{
               get: () => false
             }
           })
        }
'''

login_url = 'https://mp.dayu.com'
filename = r'大鱼号.xls'
file_list_zone = []
a = []
def input_random():
    return random.randint(50, 80)
def li(page_html, text_time):
    tree = etree.HTML(page_html)
    for number_li in range(1, 11):
        behot_time_xpath = "/html/body/div[1]/div[3]/div/div[2]/div/div[3]/ul/" + "li[{}]/".format(
            number_li) + "div[1]/div[2]/p/span[1]/text()"
        behot_time = ''.join(tree.xpath(behot_time_xpath))
        if behot_time[:7] == text_time:
            one_list = []
            titile_xpath = "/html/body/div[1]/div[3]/div/div[2]/div/div[3]/ul/" + "li[{}]/".format(
                number_li) + "div[1]/div[2]/h3/a/text()"
            title = ''.join(tree.xpath(titile_xpath))
            read_xpath = "/html/body/div[1]/div[3]/div/div[2]/div/div[3]/ul/" + "li[{}]/".format(
                number_li) + "div[1]/div[2]/p/span[2]/span/text()"
            read_a = ''.join(tree.xpath(read_xpath))
            read_b = ''.join(filter(str.isalnum, read_a))
            read = ''.join(filter(lambda c: ord(c) < 256, read_b))
            one_list.append(title)
            one_list.append(behot_time)
            one_list.append(read)
            file_list_zone.append(list(one_list))
            time.sleep(1)
            f = xlwt.Workbook()
            sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True)
            i = 0
            for data in file_list_zone:
                for j in range(len(data)):
                    sheet1.write(i, j, data[j])
                i = i + 1
            f.save(filename)
            print('OK ')
            b = 0
            a.append(b)

        elif behot_time[5:7] < text_time[5:7]:
            print("爬取完成")
            b = 1
            a.append(b)
class Display:
    def __init__(self):
        self.username = Label(root, text='用户账号')
        self.username.pack()
        self.username_entry_var = StringVar()
        self.username_entry = Entry(root, textvariable=self.username_entry_var)
        self.username_entry.pack()
        self.passwrod = Label(root, text='用户密码')
        self.passwrod.pack()
        self.password_entry_var = StringVar()
        self.passwrod_entry = Entry(root, textvariable=self.password_entry_var, show='*')
        self.passwrod_entry.pack()
        self.content_time = Label(root, text='爬取统计的 年-月')
        self.content_time.pack()
        self.content_time_entry_var = StringVar()
        self.content_time_entry = Entry(root, textvariable=self.content_time_entry_var)
        self.content_time_entry.pack()
        self.Login_button = Button(root, text='爬取统计内容', command=self.run)
        self.Login_button.pack()
        self.label = Label(root, text='     ')
        self.Tips = Label(root, text='Tips:获取的年月份-例子:2020-07\r\n爬取完成后会在当前目录中生成的大鱼号.xls文件\r\n软件未响应即是在爬取内容生成xls文件中,请勿关闭本软件\r\n   需使用Google浏览器,否则不能使用     BY: soliton')
        self.Tips.pack()
        self.label.pack()
        self.user = self.username_entry.get()
        self.passwd = self.passwrod_entry.get()
        self.fabu_time = self.content_time_entry.get()
        self.text = Text(root)
        self.text.pack()
    async def login_dayu(self):
        params = {
            # 关闭无头浏览器
            "headless": True,
            'dumpio': 'True',  # 防止浏览器卡住
            "args": [
                '--disable-infobars',  # 关闭自动化提示框
                '--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
            ],
        }
        try:
            random_number = random.randrange(1000, 3000, 300)
            browser = await launch(**params)
            pages = await browser.pages()
            page = pages[0]
            await page.setViewport({'width': 900, 'height': 768})
            await page.setUserAgent(
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36")
            await page.evaluate(webdriver_js)
            await page.goto("https://mp.dayu.com/")
            await page.evaluate(webdriver_js)  # webdriver特征值修改
            iframe = page.frames[1]
            await iframe.evaluate(webdriver_js)  # 可能原因是iframe外执行的修改浏览器特征无法应用到iframe内
            await iframe.type("#login_name", self.username_entry.get(), {"delay": input_random()})
            await iframe.waitFor(500)
            await iframe.type("#password", self.passwrod_entry.get(), {"delay": input_random()})
            await iframe.hover("#nc_1_n1z")
            await page.mouse.down()
            await page.mouse.move(random_number, 0, {'delay': input_random(), "steps": 25})
            await page.mouse.up()
            await page.waitFor(1000)
            await iframe.click("#submit_btn")
            self.text.insert(END, '\r\n登录成功' + self.username_entry.get())
            self.text.update()
            await page.waitFor(5000)
            await page.click('ul > li:nth-child(6)')
            await page.waitFor(800)
            await page.click('#w-menu-contents_publish')
            await page.waitFor(500)
            await page.click('div.contents-publish-article_type > div > ul > li:nth-child(2)')
            await page.waitFor(300)
            self.text.insert(END, '\r\n获取统计数据,爬取中……')
            self.text.update()
        except:
            self.text.delete('1.0', END)
            self.text.insert(END, '请检查账号和密码是否错误再重新点击爬取统计内容按钮')
            await page.close()
        for i in range(0, 1000000000):
            print(a)
            if 1 in a:
                break
            else:
                page_html = await page.content()
                self.text.insert(END, '\r\n开始爬取第' + str(i + 1) + '页')
                self.text.update()
                li(page_html, text_time=self.content_time_entry.get())
                if i >= 5:
                    await page.click('div.contents-publish-article_content > div > div > ul > li:nth-child(11)')
                else:
                    await page.click('div.contents-publish-article_content > div > div > ul > li:nth-child(10)')
                await page.waitFor(2000)
                self.text.insert(END, "\r\n爬取完成")
        await page.close()
        self.text.insert(END, '\r\n正在生成大鱼号.xls文件中...')
        a.clear()
    def run(self):
        loop = asyncio.get_event_loop()
        loop.run_until_complete(self.login_dayu())
if __name__ == '__main__':
    D = Display()
    if os.path.exists(filename):
        os.remove(filename)
    mainloop()

本文分成UI框架、模拟登陆、模拟翻页,解析页面并xpath提取、写入xls文件等几个部分,易懂,不过不够严谨,如果有能写出严谨的,请在下方进行发布评论,我会对相应代码进行修改,爬虫我只是业余的

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容