Python爬取花瓣网美女图片(动态网站)

摘要: 利用python, requests, xpath爬取花瓣网美女标签全部图片

# -*- coding: utf-8 -*-

'''
python 2.7.12
'''

import requests
from parsel import Selector
import time
import re, random, os


def scraw_pin_ids():

    pin_ids = []
    pin_id = '1068018182'

    flag = True
    while flag:
        try:
            url = "http://huaban.com/favorite/beauty/"
            headers1 = {
            'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
            'Accept':'application/json',
            'X-Request':'JSON',
            'X-Requested-With':'XMLHttpRequest',
            }

            params = {
                'j0l4lymf':'',
                'max':pin_id,
                'limit':'20',
                'wfl':'1',
            }

            z1 = requests.get(url, params=params, headers=headers1)

            if z1.json()['pins']:
                for i in z1.json()['pins']:
                    pin_ids.append(i['pin_id'])
                    pin_id = pin_ids[-1]
                    print i['pin_id']
                    # with open("pin_ids.txt",'ab') as f:
                    #   f.write(str(i['pin_id'])+"\n")
                    #   f.close()
                    time.sleep(0.001)
            else:
                flag = False
                return set(pin_ids)
        except:
            continue

def scraw_urls(pin_ids):

    urls = []

    urlss = ['http://huaban.com/pins/' + str(i) +'/' for i in pin_ids]
    for url in urlss:
        try:
            headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
            }

            z3 = requests.get(url, headers=headers)

            text = z3.text

            pattern = re.compile('"key":"(.*?)"', re.S)
            items = re.findall(pattern, text)

            urls.extend(items)
            print items
            print '============================================================================================================'
        except:
            continue
    return set(urls)

def download(urls):
    headers1 = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
    }
    n = 1
    urls = set(urls)
    for url in urls:
        try:
            if not os.path.exists(os.path.join(file_path, "huaban")):
                os.makedirs(os.path.join(file_path, "huaban"))
            os.chdir(file_path + '\\' + "huaban")
            try:
                url = 'http://img.hb.aicdn.com/' + url
                r = requests.get(url, headers=headers1)
                if len(r.content)>40000:
                    with open(str(n)+".jpg", 'wb') as f:
                        f.write(r.content)
                        f.close()
                        print u"第" + str(n) + u"张图片下载成功"
                        n+=1
                        # time.sleep(3)
            except:
                continue
        except:
            continue

# 图片存储路径
file_path = 'E:\selfprogress\programming\project\huaban'
pin_ids = scraw_pin_ids()
urls = scraw_urls(pin_ids)
download(urls)
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容

  • 声明:本文讲解的实战内容,均仅用于学习交流,请勿用于任何商业用途! 一、前言 强烈建议:请在电脑的陪同下,阅读本文...
    Bruce_Szh阅读 12,788评论 6 28
  • 长这么大,还是第一次来上海。之前没有一定要来的理由,既没有充足的消费能力,也无特别想逛的景点。因事第一次来,竟也喜...
    榕树下的童话阅读 283评论 0 0
  • A1:简述之前的目标是如何定的,最后的结果是怎么样? ①每个月定好的月度销售目标到了月底都不能完成。 ②因为没有如...
    4aa227d4084b阅读 112评论 1 0
  • 【师北辰一块听听写作课】 上周刚刚写了写作宣言——007践行宣言,本以为自己对写作动机或者说写作意义想的比较清楚了...
    封兄胖胖熊007阅读 332评论 0 1