代码为自学时所写,如有错误,请不吝指教
xpath相关的基础语法学习
lxml官方文档介绍
-
lxml的安装(我用的版本是4.1.1,python 3.5.2)
pip install lxml
//引用(下面是python3的引用格式)
from lxml import etree
-
简单的创建和遍历
# 创建
root = etree.Element('root')
# 添加子元素,并为子节点添加属性
root.append(etree.Element('child',interesting='sss'))
# 另一种添加子元素的方法
body = etree.SubElement(root,'body')
body.text = 'TEXT' # 设置值
body.set('class','dd') # 设置属性
//
# 输出整个节点
print(etree.tostring(root, encoding='UTF-8', pretty_print=True))
//
//
# 创建,添加子节点、文本、注释
root = etree.Element('root')
etree.SubElement(root, 'child').text = 'Child 1'
etree.SubElement(root, 'child').text = 'Child 2'
etree.SubElement(root, 'child').text = 'Child 3'
root.append(etree.Entity('#234'))
root.append(etree.Comment('some comment')) # 添加注释
# 为第三个节点添加一个br
br = etree.SubElement(root.getchildren()[2],'br')
br.tail = 'TAIL'
for element in root.iter(): # 也可以指定只遍历是Element的,root.iter(tag=etree.Element)
if isinstance(element.tag, str):
print('%s - %s' % (element.tag, element.text))
else:
print('SPECIAL: %s - %s' % (element, element.text))
-
对HTML/XML的解析
# 先导入相关模块
from lxml import etree
from io import StringIO, BytesIO
# 对html具有修复标签补全的功能
broken_html = '<html><head><title>test<body><h1 class="hh">page title</h3>'
parser = etree.HTMLParser()
tree = etree.parse(StringIO(broken_html), parser) # 或者直接使用 html = etree.HTML(broken_html)
print(etree.tostring(tree, pretty_print=True, method="html"))
#
#
#用xpath获取h1
h1 = tree.xpath('//h1') # 返回的是一个数组
# 获取第一个的tag
print(h1[0].tag)
# 获取第一个的class属性
print(h1[0].get('class'))
# 获取第一个的文本内容
print(h1[0].text)
# 获取所有的属性的key,value的列表
print(h1[0].keys(),h1[0].values())
-
使用xpath的一个小示例
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from lxml import etree
class LearnXpath(object):
"""
python 中 lxml模块的 xpath学习
"""
def __init__(self, fileName='./example.html'):
self.htmlParser = etree.HTMLParser(remove_comments=True, compact=True, encoding='UTF-8')
self.root = etree.parse(fileName, self.htmlParser)
# 打印html的内容
def __str__(self):
return str(etree.tostring(self.root, pretty_print=True, method='html'), encoding='UTF-8')
__repr__ = __str__
# 查出头像图片的alt和src, 并去重头像的地址
def listAuthorImg(self, printFlag=True):
imgList = self.root.xpath('//div[@id="list-container"]//div[@class="author"]//img')
s = set()
if printFlag:
for img in imgList:
if img.get('src') in s:
continue
else:
s.add(img.get('src'))
print('alt=%s, src=%s' % (img.get('alt'), 'http:' + img.get('src')))
else:
return imgList
# 查询图片中,alt>100的所有图片, 并去重
def listImgAltBig100(self, printFlag=True):
imgList = self.root.xpath('//div[@id="list-container"]//li//img[number(@alt)>100]')
s = set()
if printFlag:
for img in imgList:
if img.get('src') in s:
continue
else:
s.add(img.get('src'))
print('alt=%s, src=%s' % (img.get('alt'), 'http:' + img.get('src')))
# 获取文章列表的title和链接地址
def listArticle(self, printFlag=True):
articleList = self.root.xpath('//div[@id="list-container"]//li//a[@class="title"]')
if printFlag:
for article in articleList:
print('title=%s, src=%s' % ('《'+article.text + '》', 'http://www.jianshu.com'+article.get('href')))
else:
return articleList
# 获取文章名称的列表 在htmlParser中需要指定encoding='UTF-8' ,不然,获取的是乱码
def listArticleTitle(self, printFlag=True):
titleList = self.root.xpath('//div[@id="list-container"]//li//a[@class="title"]/text()')
if printFlag:
for title in titleList:
print('《' + title + '》')
else:
return titleList
# 获取文章的所有的链接地址
def listArticleHref(self, printFlag=True):
hrefList = self.root.xpath('//div[@id="list-container"]//li//a[@class="title"]/@href')
if printFlag:
for href in hrefList:
print('http://www.jianshu.com'+href)
else:
return hrefList
# 获取文章的id
def listArticleId(self, printFlag=True):
idList = self.root.xpath('//div[@id="list-container"]/ul/li/@data-note-id')
if printFlag:
for id in idList:
print(id)
# 获取第三篇文章的title和链接
def getThirdArticle(self, printFlag=True):
# 最后一篇 //div[@id="list-container"]/ul/li[last()]//a[@class="title"]
# 倒数第二篇 //div[@id="list-container"]/ul/li[last()-1]//a[@class="title"]
thirdArticle = self.root.xpath('//div[@id="list-container"]/ul/li[3]//a[@class="title"]')
if printFlag:
print('title=%s, src=%s' % ('《' + thirdArticle[0].text + '》', 'http://www.jianshu.com' + thirdArticle[0].get('href')))
else:
return thirdArticle
# 获取前三篇文章的title和链接
def listthreeArticle(self, printFlag=True):
articleList = self.root.xpath('//div[@id="list-container"]/ul/li[position()<4]//a[@class="title"]')
if printFlag:
for article in articleList:
print('title=%s, src=%s' % ('《' + article.text + '》', 'http://www.jianshu.com' + article.get('href')))
else:
return articleList
# 获取浏览量大于n的文章的标题和地址
def listArticleByFilter(self, printFlag=True, n=1000):
scanList = self.root.xpath('//div[@id="list-container"]//li//div[@class="meta"]/a[1]') # 获取所有的浏览量的 a 标签
if printFlag:
for sc in scanList:
textList = sc.xpath('./text()') # 获取a 的文本, 文本内容基本上均是['\n',' 181\n ']
for txt in textList:
try:
# print(txt)
txt = txt[:txt.index('\n')].strip() # 以\n分割,并去除前后空格
# print(txt)
scanCount = int(txt) # 转换成int类型
if scanCount > n:
# print(scanCount)
# 获取 a标签的父元素<div class="meta"> ,然后再获取该同级元素a[class="title"]
titleA = sc.xpath('./ancestor::div[@class="meta"]/preceding-sibling::a[@class="title"]')
# print(etree.tostring(titleA[0]))
print('scanCount=%d, title=%s, src=%s' % (scanCount, '《' + titleA[0].text + '》', 'http://www.jianshu.com'+titleA[0].get('href')))
except:
# print(e)
continue
else:
return 0
if __name__ == '__main__':
lx = LearnXpath()
# print(lx)
# 打印头像图片的大小,以及地址
lx.listAuthorImg(False)
# 选取 图片 alt大于100的
lx.listImgAltBig100(False)
# 所有文章的title和链接
lx.listArticle(False)
# 所有文章的标题
lx.listArticleTitle(False)
# 所有文章的链接
lx.listArticleHref(False)
# 所有文章的id
lx.listArticleId(False)
# 获取第三篇文章的title和链接
lx.getThirdArticle(False)
# 获取前三篇文章的title和链接
lx.listthreeArticle(False)
# 获取浏览量大于1000的文章
lx.listArticleByFilter(printFlag=False)