搜索所有股票的相关信息并保存
草稿算完成了,但是代码显然价值很低,未进行任何优化,就是图个快速写完,所以看官别骂
下一步考虑在这个代码的基础上做优化,估计首先应该是解决代码'味道'的问题,然后是多线程的问题。其实有点想能否将这个代码修改成golang的,不过不知道golang是否有selenium的包包。
#coding=utf-8
from selenium import webdriver
import time
import os
import re
import sys
from selenium.common.exceptions import NoSuchElementException
reload(sys)
sys.setdefaultencoding('utf8')
def showlink(linklist):
for x in linklist:
print x.get_attribute('href')
print "股票数:%d"%(len(linklist))
def getinfo(mydriver,gourl):
linetext=""
mydriver.get(gourl)
try:
gupiaoming=mydriver.find_element_by_xpath(title).text
gupiaocode=mydriver.find_element_by_xpath(code).text
hexinshuju=mydriver.find_element_by_class_name('pad5')
shujuhang=hexinshuju.find_elements_by_tag_name('tr')
for i in range(len(shujuhang)-2):
shujulie=shujuhang[i].find_elements_by_tag_name('td')
tmpshuju=myre.split(shujulie[0].text)
linetext=linetext+"~"+tmpshuju[1]
shuju=myre.split(shujuhang[8].text)
linetext=linetext+"~"+shuju[1]
tmpshuju=myre.split(shujuhang[9].text)
linetext=linetext+"~"+tmpshuju[1]
linetext="%s~%s%s"%(gupiaoming,gupiaocode,linetext)
print "数据写入",linetext
myfile.write("%s\n"%(linetext))
except NoSuchElementException,e:
print "不是股票"
######################
br=webdriver.Firefox()
brsub=webdriver.Firefox()
baseurl="http://quote.eastmoney.com/"
indexurl="stocklist.html"
gourl="%s%s"%(baseurl,indexurl)
br.get(gourl)
shxpath="/html/body/div[9]/div[2]/div/ul[1]"
szxpath="/html/body/div[9]/div[2]/div/ul[2]"
shgupiao=br.find_element_by_xpath(shxpath)
szgupiao=br.find_element_by_xpath(szxpath)
shgupiaolist=shgupiao.find_elements_by_tag_name('a')
szgupiaolist=szgupiao.find_elements_by_tag_name('a')
title='//*[@id="name"]'
code='//*[@id="code"]'
hexinshujuxpath="/html/body/div[14]/div[1]/div[4]/div[1]"
restr=":".decode('utf8')
myre=re.compile(restr,re.I|re.M|re.S)
#mylist=['http://quote.eastmoney.com/sh603678.html','http://quote.eastmoney.com/sh603686.html']
print "#### 获得上海股票数据 ############"
print "#### 数据保存到shgupiaodata.text ############"
filename='shgupiaodata.txt'
myfile = open(filename, 'w')
for i in shgupiaolist:
myurl=i.get_attribute('href')
print myurl
if myurl!=None:
getinfo(brsub,myurl)
myfile.close()
print "#### 获得深圳股票数据 ############"
print "#### 数据保存到szgupiaodata.text ############"
filename='szgupiaodata.txt'
myfile = open(filename, 'w')
for i in szgupiaolist:
myurl=i.get_attribute('href')
print myurl
if myurl!=None:
getinfo(brsub,myurl)
myfile.close()
print "数据下载完成"
br.quit()
brsub.quit()