LXML

解析库-LXML

pip install -i https://pypi.tuna.tsinghua.edu.cn/simple lxml

使用解析规则:XPath

表达式 描述
nodename 选取此节点的所有子节点
/ 从当前节点,选取直接子节点
// 从当前节点,选取所有子孙节点
. 选取当前节点
.. 选取当前节点的父节点
@ 选取属性

构建实例

从文本构建

def load_text():
    text = '''
    <div>
        <ul>
            <li class="item-0"><a href="link1.html">first item</a></li>
            <li class="item-1"><a href="link2.html">second item</a></li>
            <li class="item-inactive"><a href="link3.html">third item</a></li>
            <li class="item-1"><a href="link4.html">fourth item</a></li>
            <li class="item-0"><a href="link5.html">fifth item</a>
        </ul>
    </div>
    '''

    html = etree.HTML(text)
    result = etree.tostring(html)
    print(result.decode("utf-8"))

从文件构建

def load_text2():
    html = etree.parse("./test.html", etree.HTMLParser())
    result = etree.tostring(html)
    print(result.decode("utf-8"))

注意
etree.toString()返回的是bytes类型,需要调用decode方法将其转换成String类型

经过处理后的html代码,会被自动修复,添加缺少的标签

选中所有节点

要选中某个类型的所有节点,以 // 开头就可以了

 # 选中所有节点
    html = etree.parse("./test.html", etree.HTMLParser())
    results = html.xpath("//*")
    print(results)

# 选中所有的li节点
    results = html.xpath("//li")
    print(results)
    print(results[0])

选中所有子节点

 # 选中直接子节点
    html = etree.parse("./test.html", etree.HTMLParser())
    result = html.xpath("//li/a")
    print(result)

选中所有子孙节点

 # 选取所有子孙节点
    result = html.xpath("//li//a")
    print(result)

选取父亲节点

def select_parent_nodes():
    html = etree.parse("./test.html", etree.HTMLParser())
    # 选中属性href='link4.html'的a标签的直接父亲
    result = html.xpath("//a[@href='link4.html']/..")
    print(result)

    # 选中属性href='link4.html'的a标签的父亲的class属性
    result = html.xpath("//a[@href='link4.html']/../@class")
    print(result)
    # 使用parent::选中父亲
    result = html.xpath("//a[@href='link4.html']/parent::*/@class")
    print(result)

test.html

<div>
    <ul>
        <li class="item-0"><a href="link1.html">first item</a></li>
        <li class="item-1"><a href="link2.html">second item</a></li>
        <li class="item-inactive"><a href="link3.html">third item</a></li>
        <li class="item-1"><a href="link4.html">fourth item</a></li>
        <li class="item-0"><a href="link5.html">fifth item</a>
    </ul>
</div>

按属性选择

def select_node_by_attrs():
    html = etree.parse("./test.html", etree.HTMLParser())
    result = html.xpath("//li[@class='item-0']")
    print(result) 

获取节点文本

def get_text():
    html = etree.parse("./test.html", etree.HTMLParser())
    print(etree.tostring(html).decode("utf-8"))
    # 使用/获取自身的文本内容
    result = html.xpath("//li[@class='item-0']/text()")
    print(result) # ['\r\n\t']
    # 使用//获取子孙的文本和自己的文本
    result = html.xpath("//li[@class='item-0']//text()")
    print(result) # ['first item', 'fifth item', '\r\n\t']

获取属性的值

def get_attr_content():
    html = etree.parse("./test.html", etree.HTMLParser())
    result = html.xpath("//li/a/@href")
    print(result)

多值属性匹配

使用contains函数

def get_multi_attr_content():
    text = '''
               <li class="li li-first"><a href="link1.html">first item</a></li>       
    '''
    html = etree.HTML(text)
    result = html.xpath("//li[@class='li']/a/text()")
    print(result)# []
    result = html.xpath("//li[contains(@class, 'li')]/a/text()")
    print(result)# ['first item']

多属性匹配

def get_multi_attr_match():
    text = '''
               <li class="li li-first" name="item"><a href="link1.html">first item</a></li>       
    '''
    html = etree.HTML(text)
    result = html.xpath("//li[contains(@class, 'li') and @name='item']/a/text()")
    print(result)# ['first item']

and 是xpath中的运算符

运算符 描述 实例
or age=19 or age=20
and age>19 and age<21
mod 取余 5 mod 2
计算两个节点集 //book | //cd 返回所有拥有book和cd元素的节点集
+ 加法 6+4
- 减法 6-4
* 乘法 6*4
div 除法 6 div 3
= 等于 age=19
!= 不等于 age!=19
< 小于 age<19
<= 小于或等于 age<=19
> 大于 age>19
>= 大于或等于 age>=19

按顺序选取节点

def select_node_by_order():
    html = etree.parse("./test.html", etree.HTMLParser())
    # 选取第一个节点
    result = html.xpath("//li[1]/a/text()")
    print(result)
    # 选取最后一个节点
    result = html.xpath("//li[last()]/a/text()")
    print(result)
    # 选取前两个节点
    result = html.xpath("//li[position()<3]/a/text()")
    print(result)
    # 选取倒数第三个节点
    result = html.xpath("//li[last()-2]/a/text()")
    print(result)
image.png

各种查询

def select_node_by_axies():
    text = '''
        <div>
            <ul>
                <li class="item-0"><a href="link1.html"><span>first item</span></a></li>
                <li class="item-1"><a href="link2.html">second item</a></li>
                <li class="item-inactive"><a href="link3.html">third item</a></li>
                <li class="item-1"><a href="link4.html">fourth item</a></li>
                <li class="item-0"><a href="link5.html">fifth item</a>
            </ul>
        </div>
        '''
    html = etree.HTML(text)
    # 指定元素所有祖先
    result = html.xpath("//li[1]/ancestor::*")
    print(result)
    # 输出[<Element html at 0x37bf350>, <Element body at 0x37bf300>, <Element div at 0x37bf2d8>, <Element ul at 0x37b9fd0>]
    # 指定元素所有div祖先
    result = html.xpath("//li[1]/ancestor::div")
    print(result)
    # 获取所有属性值
    result = html.xpath("//li[1]/attribute::*")
    print(result)
    # 找到直接子节点中满足条件的元素
    result = html.xpath("//li[1]/child::a[@href='link1.html']")
    print(result)
    # 找到所有的后代元素
    result = html.xpath("//li[1]/descendant::*")
    print(result)
    # 找到后低元素的span
    result = html.xpath("//li[1]/descendant::span")
    print(result)
    # 获取该节点后面的兄弟节点以及兄弟节点的子孙节点
    result = html.xpath("//li[1]/following::*")
    print(result)
    # 获取该节点后面的兄弟节点以及兄弟节点的子孙节点的第一个节点
    result = html.xpath("//li[1]/following::*[1]")
    print(result)
    # 获取该节点后面的所有兄弟节点
    result = html.xpath("//li[1]/following-sibling::*")
    print(result)
image.png

爬取百度贴吧图片

校花吧
获取网页源码
BaiduTieBa.py

import requests


class BaiduTieBa:
    def __init__(self,name,pn):
        self.name = name
        self.url = 'http://tieba.baidu.com/f?kw={}&ie=utf-8&pn='.format(name)
        self.headers = {
            'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)'
        }

        self.url_list = [self.url + str(n * 50) for n in range(pn)]
        print(self.url_list)


    def get_data(self,url):
        r = requests.get(url, headers=self.headers)
        return r.content

    def save_data(self,data,num):
        file_name = self.name + "_" + str(num) + ".html"
        with open(file_name,'wb') as f:
            f.write(data)

    def run(self):
        for url in self.url_list:
            data = self.get_data(url)
            num = self.url_list.index(url)
            self.save_data(data,num)
import sys
if __name__ == '__main__':
    name = sys.argv[1]
    pn = int(sys.argv[2])
    baidu = BaiduTieBa(name, pn)
    baidu.run()

输入命令行

python BaiduTieBa.py 校花 5
image.png

get_photo.py

from lxml import etree
import requests

class DownloadPhoto:
    def __init__(self):
        pass

    def down_load_img(self,url):
        r = requests.get(url)
        index = url.rfind("/")
        file_name = url[index+1:]
        save_name = './photo/' + file_name
        print("下载图片" + save_name)

        with open(save_name, 'wb') as f:
            f.write(r.content)

    def parse_photo_url(self, page):
        html = etree.parse(page,etree.HTMLParser())
        nodes = html.xpath("//a[contains(@class,'thumbnail')]/img/@bpic")
        for node in nodes:
            self.down_load_img(node)


if __name__ == '__main__':
    down = DownloadPhoto()
    file_name = ["校花_0.html","校花_1.html","校花_2.html","校花_3.html","校花_4.html"]
    for url in file_name:
        down.parse_photo_url(url)

run此方法


image.png

image.png

注意使用老版本的请求头,可以在ie中调整版本获取

©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

相关阅读更多精彩内容

友情链接更多精彩内容