抓取基本解释信息 及其他字典地址
# --------------------------------------
# 抓取字典详细信息,保存
# 保存字典其他信息网址
# --------------------------------------
import string
import urllib.request
import re
from bs4 import BeautifulSoup
import codecs
# 页面地址
# 开网页地址文件并抓取
def scrapt(zurl):
print(zurl)
req = urllib.request.Request(zurl)
req.add_header('Referer', 'http://www.zdic.net/z/jbs/')
req.add_header('User-Agent', 'None')
responseb = urllib.request.urlopen(req)
index_z = responseb.read()
# 理数据得到字典解释及相关字典
index_z = index_z.decode('utf8')
# 个真是无语了
# 取页面中的信息及字页面地址
soup = BeautifulSoup(index_z)
tab_raw = soup.find_all(attrs={'class' : 'tab-row'})
# 取自他字典地址
for itemtab in tab_raw:
zurllist = re.findall(r'/./.*?htm', str(itemtab))
for line in zurllist:
zurllistfile.write('http://www.zdic.net'+line+'\n')
# 数不能为叠加器
tab_page = soup.find_all(attrs={'class':'tab-page'})
# 取字典解释获取解释内容
# 取url中的16进制代码
keyq = re.split(r'[/.]',zurl)[-2]
print(keyq)
if len(keyq)>4:
keyq = keyq[1:]
print(keyq)
key = (b'\u' + keyq.encode()).decode('unicode-escape')
print(key)
# 存字典内容
for tab_page_item in tab_page:
tab_page_item['key'] = key
type(str(tab_page_item))
hdjbjs.write(str(tab_page_item)+'\n')
# 数不能为叠加器
hdjbjs = codecs.open("hdjibenjieshi_file", "w",'utf-8')
# 开字典数据文件
hdjbjs.write("<xml name='汉典基本解释'>")
# 数不能为叠加器
zurllistfile = codecs.open("otherzurllist", "w",'utf-8')
# 开字典地址文件
jsurlfile = open('zdurlfile_jibenjieshizdurl','r')
# 取网址
a = jsurlfile.read()
b = a.split('\n')
for zurl in b:
if len(zurl) !=0:
scrapt(zurl)
# 出前关闭文件
zurllistfile.close()
hdjbjs.write("</xml>")
# 数不能为叠加器
hdjbjs.close()