二.爬虫简介以及爬虫的技术价值
2-1:爬虫是什么?
2-2:爬虫技术的价值?
三.简单爬虫架构
3-1:简单爬虫架构
3-2:简单爬虫架构的动态运行流程:
四.URL管理器和实现方法
4-1 URL管理器
4-2 URL管理器的实现方式
五.网页下载器和urllib2模块
5.1 网页下载器简介
5.2 urlib2下载器网页的三种方式
1.urlopen() 直接打开
import urllib2
#直接请求
response = urllib2.urlopen('http://www.baidu.com')
#获取状态码,如果是200表示获取成功
print response.getcode()
#获取内容
cont = response.read()
2.添加data,http header
import urlib2
#创建Request对象
request = urllib2.Request(url)
#添加数据
request.add_data('a','1')
#添加http的header,伪装成浏览器登录
request.add_header('User-Agent','Mozilla/5.0')
#发送请求获取结果
response = urllib2.urlopen(request)
3.添加特殊情景的处理器
import urllib2,cookielib
#创建cookie容器
cj = cookielib.CookieJar()
# 创建1个opener
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#给urllib2安装opener
urllib2.install_opener(opener)
#使用带有cookie的urllib2访问网页
response = urllib2.urlopen("http://www.xxx.com")
六.**网页解析器和BeautifulSoup第三方模块 **
6-1:网页解析器简介
6-2:BeautifulSoup模块介绍和安装
用于从HTML或XML中提取数据
6-3:BeautifulSoup的语法
创建 BeautifulSoup对象:
6-4: BeautifulSoup实例测试
import re
from bs4 import BeautifulSoup
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc,'html.parser',from_encoding='utf-8')
print '获取所有的链接'
links = soup.find_all('a') #查找所有标签为a的节点
for link in links:
print link.name,link['href'],link.get_text()
print '获取lacie的链接'
link_node = soup.find('a',href = 'http://example.com/lacie')#查找所有标签为a,且链接符合xx的节点
print link_node.name, link_node['href'],link_node.get_text()
print '正则匹配'
link_node = soup.find('a',href = re.compile(r"ill"))#查找所有标签为a,且正则匹配的节点
print link_node.name, link_node['href'],link_node.get_text()
print '获取p段落文字'
p_node = soup.find('p',class_ = "title") #查找所有标签为p,且class为title的节点
print p_node.name,link_node.get_text()
七.**实战演练:爬取百度百科1000个页面的数据 **
7-1:爬虫实例-分析目标
审查元素
7-2:调度程序:spider_main.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
from baike_spider import url_manager
from baike_spider import html_downloader
from baike_spider import html_parser
from baike_spider import html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url): #爬虫的调度程序
count = 1 #记录当前爬去的是第几个url
self.urls.add_new_url(root_url)
try:
while self.urls.has_new_url(): #如果有新的url
new_url = self.urls.get_new_url() #获取一个新的url
print 'craw %d : %s ' % (count , new_url) #输出第几个Url
html_cont = self.downloader.download(new_url) #下载Url
new_urls,new_data = self.parser.parse(new_url,html_cont) #调用解析器来解析页面,传入当前url,和爬去的数据
self.urls.add_new_urls(new_urls) #解析出来的Url添加至url管理器
self.outputer.collect_data(new_data) #收集解析出来的data
if count == 1000:
break
count = count + 1
except:
print 'craw failed' #异常处理
self.outputer.output_html() #输出收集好的数据
if __name__=="__main__":
root_url = "http://baike.baidu.com/view/21087.htm"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
7-3:URL管理器:url_manager.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
class UrlManager(object): #需要维护2个列表:1.待爬取的列表 2,爬取过的列表
def __init__(self):
self.new_urls = set()
self.old_urls = set()
def add_new_url(self,url): #向管理器中添加一个新的URL
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls): #批量添加
if urls is None or len(urls) == 0: #urls空或者数量为0
return
for url in urls:
self.add_new_url(url) #逐个添加
def has_new_url(self): #判断管理器中是否有新的待爬取的url
return len(self.new_urls) != 0
def get_new_url(self): #从url管理器中获取一个新的待爬取的Url
new_url = self.new_urls.pop() #pop方法取出并移除一个
self.old_urls.add(new_url)
return new_url
7-4:HTML下载器:html_downloader.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
import urllib2
class HtmlDownloader(object): #对外提供一个下载的方法
def download(self,url):
if url is None:
return None
response = urllib2.urlopen(url)
if response.getcode() != 200: #失败时候
return None
return response.read()
7-5;HTML解析器:html_parser.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
import re
import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object): #传入一个url,和数据 解析出新的url和数据
def parse(self,page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont, 'html.parser',from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls,new_data
def _get_new_urls(self, page_url, soup): #获取其他词条的url
new_urls = set()
#正则需要匹配类似 /view/123.htm
links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm")) #查找所有标签为a的节点
for link in links:
new_url = link['href'] #获取新的链接
new_full_url = urlparse.urljoin(page_url, new_url) #拼接成完整的url
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup): #解析数据
res_data = {}
#url
res_data['url'] = page_url
#<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find("h1")
res_data['title'] = title_node.get_text()
#<div class="lemma-summary" label-module="lemmaSummary">
summary_node = soup.find('div', class_="lemma-summary")
res_data['summary'] = summary_node.get_text()
return res_data
7-6:HTML输出器:html_outputer.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
class HtmlOutputer(object):
def __init__(self):
self.datas = [] #列表
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
def output_html(self):
fout = open('output.html','w')
fout.write("<html>")
fout.write("<head>")
fout.write(' <meta charset="utf-8"> ')
fout.write("</head>")
fout.write("<body>")
fout.write("<table>")
#python默认编码 Ascii
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>" % data['url'])
fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
fout.write("</tr>")
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
7-7:开始运行爬虫和爬取结果展示:output.html
八:课程总结
ps:图解讲课的方式真的很赞~~