凤凰网分类爬虫

1. pycharm开发工具+python2.7+scrapy框架

2.项目开发

2.1 创建项目

scrapy startproject Ifeng

image.png

2.2 写自己需要的参数,在items文件里面写

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class IfengdataItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    # 大类的标题 和 url
    parentTitle = scrapy.Field()
    parentUrls = scrapy.Field()

    # 小类的标题 和 子url
    subTitle = scrapy.Field()
    subUrls = scrapy.Field()

    # 小类目录存储路径
    subFilename = scrapy.Field()

    # 小类下的子链接
    sonUrls = scrapy.Field()

    # 文章标题和内容
    head = scrapy.Field()
    content = scrapy.Field()
    pass

2.2 在spider子目录下新建爬虫开始文件,我这里命名为ifeng.py

#coding=utf-8
import scrapy
from ifengdata.items import IfengdataItem
import os
class ifengdata(scrapy.Spider):
    name='ifeng'
    allowed_domains = ["ifeng.com"]
    start_urls = [
        "http://www.ifeng.com/daohang/"
    ]
    def parse(self, response):

        items=[]
        #所有的大标题和url
        parentUrls = response.xpath('//div[@class="col3"]/h2/a/@href').extract()
        parentTitle = response.xpath('//div[@class="col3"]/h2/a/text()').extract()

        # 所有小类的ur 和 标题
        subUrls = response.xpath('//div[@class="col3"]/div/div/div/div/ul/li/a/@href').extract()
        subTitle= response.xpath('//div[@class="col3"]/div/div/div/div/ul/li/a/text()').extract()
        for i in range(0,len(parentTitle)):
            # 指定大类目录的路径和目录名
            parentFilename = "./Data/" + parentTitle[i]

            # 如果目录不存在,则创建目录
            if (not os.path.exists(parentFilename)):
                os.makedirs(parentFilename)
            for j in range(0,len(subTitle)):
                item = IfengdataItem()
                item['parentUrls']=parentUrls[i]
                item['parentTitle']=parentTitle[i]
                # yield item
                # 检查小类的url是否以同类别大类url开头,如果是返回True (sports.sina.com.cn 和 sports.sina.com.cn/nba)
                if_belong = subUrls[j].startswith(item['parentUrls'])
                # 如果属于本大类,将存储目录放在本大类目录下
                if (if_belong):
                    subFilename = parentFilename + '/' + subTitle[j]
                    # 如果目录不存在,则创建目录
                    if (not os.path.exists(subFilename)):
                        os.makedirs(subFilename)

                    # 存储 小类url、title和filename字段数据
                    item['subUrls'] = subUrls[j]
                    item['subTitle'] = subTitle[j]
                    item['subFilename'] = subFilename
                    items.append(item)
                    # 发送每个小类url的Request请求,得到Response连同包含meta数据 一同交给回调函数 second_parse 方法处理
        for item in items:
            yield scrapy.Request(url=item['subUrls'], meta={'meta_1': item}, callback=self.second_parse)

    def second_parse(self,response):
        # 提取每次Response的meta数据
        meta_1 = response.meta['meta_1']
        # 取出小类里所有子链接
        # 取出小类里所有子链接
        sonUrls = response.xpath('//a/@href').extract()

        items = []
        for i in range(0, len(sonUrls)):
            # 检查每个链接是否以大类url开头、以.shtml结尾,如果是返回True
            if_belong =sonUrls[i].startswith(meta_1['parentUrls'])

            # 如果属于本大类,获取字段值放在同一个item下便于传输
            if (if_belong):
                item = IfengdataItem()
                item['parentTitle'] = meta_1['parentTitle']
                item['parentUrls'] = meta_1['parentUrls']
                item['subUrls'] = meta_1['subUrls']
                item['subTitle'] = meta_1['subTitle']
                item['subFilename'] = meta_1['subFilename']
                item['sonUrls'] = sonUrls[i]
                items.append(item)
        # 发送每个小类下子链接url的Request请求,得到Response后连同包含meta数据 一同交给回调函数 detail_parse 方法处理
        for item in items:
            yield scrapy.Request(url=item['sonUrls'], meta={'meta_2': item}, callback=self.detail_parse)

            # 数据解析方法,获取文章标题和内容

    def detail_parse(self, response):
        item = response.meta['meta_2']
        content = ""
        head = response.xpath('//h1[@id=\"artical_topic\"]/text()').extract()
        content_list = response.xpath('//div[@id=\"main_content\"]/p/text()').extract()
        # 将p标签里的文本内容合并到一起
        for content_one in content_list:
            content += content_one
        if head!='':
            item['head'] = head[0]
            print(item['head'])
        else :
            item['head']=1
        item['content'] = content

        yield item

2.3配置管道pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

# import pymongo
import pymysql
from ifengdata import settings
class IfengdataPipeline(object):
    def __init__(self):
        # host='127.0.0.1'
        # port=27017
        # client=pymongo.MongoClient(host=host,port=port)
        # dbname = 'ifeng'
        #
        # # pymongo.MongoClient(host, port) 创建MongoDB链接
        # # 指向指定的数据库
        # mdb = client[dbname]
        # # 获取数据库里存放数据的表名
        # self.post = mdb['ifengdata1']
        # 获取setting主机名、端口号和数据库名
        # host = settings['MONGODB_HOST']
        # host='127.0.0.1'
        # port = settings['MONGODB_PORT']
        # port=27017
        # dbname = settings['MONGODB_DBNAME']
        # dbname='IFeng'
        # pymongo.MongoClient(host, port) 创建MongoDB链接
        # client = pymongo.MongoClient(host=host, port=port)
        # 指向指定的数据库
        # mdb = client[dbname]
        # 获取数据库里存放数据的表名
        # self.post = mdb[settings['MONGODB_DOCNAME']]
        # self.post= mdb['IFengData']
        self.conn = pymysql.connect(
            host='localhost',
            port=3306,
            user='root',
            password='root',
            db='test',
            charset='utf8',
        )
        self.cursor=self.conn.cursor()
    def process_item(self, item, spider):
        item = dict(item)
        sql = 'select * from ifengdata4 WHERE mulu=%s'
        par =[item['parentTitle']]
        name = self.cursor.execute(sql,par)
        self.conn.commit()
        if name:
            pass
        else:
            sql1='insert into ifengdata4(id,mulu) VALUES(null,%s)'
            params=[item['parentTitle']]
            self.cursor.execute(sql1,params)
            self.conn.commit()
        sql4 = 'select * from ifengdata5 WHERE zimulu=%s'
        par4 = [item['subTitle']]
        name = self.cursor.execute(sql4, par4)
        self.conn.commit()
        if name:
            pass
        else:
            sql2 = 'insert into ifengdata5(id,zimulu) VALUES(null,%s)'
            params2=[item['subTitle']]
            self.cursor.execute(sql2, params2)
            self.conn.commit()
        sql3 = 'insert into ifengdata6(id,title) VALUES(null,%s)'
        params3 = [item['head']]
        self.cursor.execute(sql3, params3)
        self.conn.commit()
        return item

2.4设置settings.py文件

# -*- coding: utf-8 -*-

# Scrapy settings for ifengdata project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'ifengdata'

SPIDER_MODULES = ['ifengdata.spiders']
NEWSPIDER_MODULE = 'ifengdata.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ifengdata (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'ifengdata.middlewares.IfengdataSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'ifengdata.middlewares.MyCustomDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'ifengdata.pipelines.IfengdataPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# MONGODB 主机环回地址127.0.0.1
# MONGODB_HOST = '127.0.0.1'
# 端口号,默认是27017
# MONGODB_PORT = 27017
# 设置数据库名称
# MONGODB_DBNAME = 'IFeng'
# 存放本次数据的表名称
# MONGODB_DOCNAME = 'IFengData'

DOWNLOAD_DELAY = 1

REDIS_HOST = "192.168.13.23"
REDIS_PORT = 6379

可以试一下哦,有问题请留言

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 204,530评论 6 478
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 86,403评论 2 381
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 151,120评论 0 337
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,770评论 1 277
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,758评论 5 367
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,649评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,021评论 3 398
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,675评论 0 258
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,931评论 1 299
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,659评论 2 321
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,751评论 1 330
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,410评论 4 321
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,004评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,969评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,203评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,042评论 2 350
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,493评论 2 343

推荐阅读更多精彩内容