最近公司做一款数据爬虫的系统,由于对数据精确度要求比较高,需要核对每一个爬取下来的数据(数据不定时更新,有几千条),所以测试过程中大量时间都花费在了核对源网址数据上面,效率太低。所以从网上找了一些个资料,结合我自己的项目,把需要核对的数据提前爬取下来,并写入CSV表格中,这样在测试的时候,直接核对表格就可以了,大大提高了工作效率。代码如下:
from bs4 import BeautifulSoup as BS
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time,csv
#存储到csv中的函数
with open('results.csv', 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(['指标','名称', '价格','日期'])
#存储到csv中的函数
def csv_write(tablelist):
with open('results.csv', 'a', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(tablelist)
#获得文章相关信息的列表
class B():
def get_tablelist(self,html,e1,e2,e3,z):
self.e1 = e1
self.e2 = e2
self.e3 = e3
self.z = z
soup = BS(html, 'lxml')
namelist = soup.select(e1)
priceRMBlist = soup.select(e2)
#priceDOLLARlist = soup.select('ul.zhishu1 > li:nth-of-type(10)')
daylist = soup.select(e3)
tablelist = []
for a, b,d in zip(namelist, priceRMBlist,daylist):
name = a.get_text()
rmb = b.get_text()
#doller = c.get_text()
date = d.get_text()
#print(type(date))
tablelist.extend([z,name, rmb, date])
#print(tablelist)
return tablelist
def mainfun():
driver = webdriver.Firefox()
url = 'http://www.sxcoal.com/'
driver.get(url)
driver.execute_script("window.scrollBy(0,700)")
time.sleep(5)
html = driver.page_source
for i in range(12):
if i==0:
e1 = 'ul.zhishu1 > li:nth-of-type(19)'
e2 = 'ul.zhishu1 > li:nth-of-type(20)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000305770"
elif i==1:
e1 = 'ul.zhishu1 > li:nth-of-type(19)'
e2 = 'ul.zhishu1 > li:nth-of-type(22)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000305771"
elif i==2:
e1 = 'ul.zhishu1 > li:nth-of-type(55)'
e2 = 'ul.zhishu1 > li:nth-of-type(56)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000606480"
elif i==3:
e1 = 'ul.zhishu1 > li:nth-of-type(61)'
e2 = 'ul.zhishu1 > li:nth-of-type(62)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000606481"
elif i==4:
e1 = 'ul.zhishu1 > li:nth-of-type(79)'
e2 = 'ul.zhishu1 > li:nth-of-type(80)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000606483"
elif i==5:
e1 = 'ul.zhishu1 > li:nth-of-type(97)'
e2 = 'ul.zhishu1 > li:nth-of-type(98)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000606484"
elif i==6:
e1 = 'ul.zhishu1 > li:nth-of-type(7)'
e2 = 'ul.zhishu1 > li:nth-of-type(8)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000305768"
elif i==7:
e1 = 'ul.zhishu1 > li:nth-of-type(7)'
e2 = 'ul.zhishu1 > li:nth-of-type(10)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000305769"
elif i==8:
e1 = 'ul.zhishu1 > li:nth-of-type(13)'
e2 = 'ul.zhishu1 > li:nth-of-type(14)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000404858"
elif i==9:
e1 = 'ul.zhishu1 > li:nth-of-type(13)'
e2 = 'ul.zhishu1 > li:nth-of-type(16)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000404859"
elif i==10:
e1 = 'ul.zhishu1 > li:nth-of-type(25)'
e2 = 'ul.zhishu1 > li:nth-of-type(26)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000847115"
elif i==11:
e1 = 'ul.zhishu1 > li:nth-of-type(25)'
e2 = 'ul.zhishu1 > li:nth-of-type(28)'
e3 = 'div.jytitle > span:nth-of-type(1)'
z = "CA_0000847116"
tablelist = B().get_tablelist(html,e1,e2,e3,z)
csv_write(tablelist)
driver.quit()
if __name__ == '__main__':
mainfun()
下面是输出结果:
做完之后还真有点小激动,嘿嘿。
只是,每次添加数据的方式,感觉有点麻烦,数据添加方式的优化和后续的数据爬取待研究中。。。。。