1. selenium语法
"""__author__= 雍新有"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
# # 打开浏览器
# # 默认Chrome()中需要传入chromedriver.exe,只需配置chromedriver环境变量就行了
# browser = webdriver.Chrome()
# # get(url): 表示打开某个地址
# browser.get('https://www.baidu.com')
# # 关闭浏览器
# browser.close()
# 1.获取豆瓣电影信息
# browser = webdriver.Chrome()
# browser.get('https://movie.douban.com/explore#!type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=0')
# # 获取‘加载更多按钮’, 实现点击事件
# # 模拟向下拉进度条
# js1 = 'window.scrollTo(0, 1000)'
# # browser.execute_script(js1)
# # 获取加载按钮
# # 按钮的xpath代码 -- //*[@id="content"]/div/div[1]/div/div[4]/a
# more_button = browser.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/div[4]/a')
# # 实现点击
# more_button.click()
# # 由于点击事件刚执行,页面还没有被ajax获取到的数据所渲染,所以要睡眠几秒
# time.sleep(3)
#
# # page_source获取源码,经过ajax渲染后的源码
# a1 = browser.page_source
# # a1为源码,解析可采用正则、lxml.etree、beactifulsoup4
# print(a1)
# browser.close()
# 2. 京东 - 查询元素
# browser = webdriver.Chrome()
# browser.get('https://www.jd.com')
# # 输入框
# input = browser.find_element_by_xpath('//*[@id="key"]')
# print(input)
# input = browser.find_element_by_id('key')
# print(input)
# # 选择器,copy里面有
# input = browser.find_element_by_css_selector('#key')
#
# # 通用写法
# # input = browser.find_element(By.XPATH, '//*[@id="key"]')
# # input = browser.find_element(By.ID, 'ID')
# # input = browser.find_element(By.CSS_SELECTOR, '#key')
#
# # 输入内容
# input.send_keys('Mac')
# 3. 前进后退
# 依次在一个窗口打开下面3个网站
# browser = webdriver.Chrome()
# browser.get('https://www.baidu.com')
# browser.get('https://www.jd.com')
# browser.get('http://blog.vincent-whf.top')
#
# # 回退
# browser.back()
# # 前进
# browser.forward()
# 4.获取京东,输入内容,点击搜索
# 等待(隐式等待、显式等待)
# browser = webdriver.Chrome()
# browser.get('https://www.jd.com')
# # 不睡眠,可能网页加载不出来,后面的xpath就找不到元素
# # time.sleep(3)
#
# # 隐式等待,如果获取元素找不到,则等待规定时长(默认为0),等待时长自定义
# browser.implicitly_wait(10)
#
# input = browser.find_element(By.XPATH, '//*[@id="key"]')
# # 将输入框的内容清空
# input.clear()
# # 输入信息
# input.send_keys('零食')
# # 获取搜索点击按钮
# button = browser.find_element(By.XPATH, '//*[@id="search"]/div/div[2]/button')
# # 搜索点击
# button.click()
# browser.implicitly_wait(10)
#
# # 获取页码
# total = browser.find_element(By.XPATH, '//*[@id="J_bottomPage"]/span[2]/input')
# 显式等待
browser = webdriver.Chrome()
browser.get('https://www.jd.com')
# 获取等待对象,等待10秒
wait = WebDriverWait(browser, 10)
# presence_of_all_elements_located: 当元素被加载出来了才获取信息
input = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="key"]'))
)
# 将输入框的内容清空
input.clear()
# 输入信息
input.send_keys('水果')
# element_to_be_clickable: 等待元素可被点击
button = wait.until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="search"]/div/div[2]/button'))
)
# 搜索点击
button.click()
# 获取页码
total = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/em[1]/text()'))
)
print(total)
2. 京东selenium爬虫
"""__author__= 雍新有"""
import re
import time
import pymongo
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
chrome_options = webdriver.ChromeOptions()
# 配置不用加载图片
prefs = {"profile.managed_default_content_settings.images":2}
chrome_options.add_experimental_option("prefs", prefs)
# 获取浏览器
browser = webdriver.Chrome(chrome_options=chrome_options)
# 显式等待
wait = WebDriverWait(browser, 20)
def search():
browser.get('https://www.jd.com')
# 获取输入框,输入内容
input = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="key"]'))
)
input.clear()
input.send_keys('水果')
# 获取按钮,实现点击
button = wait.until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="search"]/div/div[2]/button'))
)
button.click()
for i in range(1, 17):
js = f'window.scrollTo(0, {i} * document.body.scrollHeight / 16)'
browser.execute_script(js)
time.sleep(5)
total = wait.until(
EC.presence_of_element_located((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/em[1]'))
)
# 获取第一页源码
html = browser.page_source
result = parse_html(html)
save_mongo(result, 1)
return total.text
def next_page(page):
# 获取从第二页开始的每一页信息z
# 翻页: 1. 可点击下一页 2. 可输入页面,实现点击
# 下一页xpath = // *[ @ id = "J_bottomPage"] / span[1] / a[9]
# 执行滚动
for i in range(1, 33):
js = f'window.scrollTo(0, {i} * document.body.scrollHeight / 32)'
browser.execute_script(js)
time.sleep(5)
# js = 'window.scrollTo(0, 2*document.body.scrollHeight / 4)'
# browser.execute_script(js)
# time.sleep(1)
#
# js = 'window.scrollTo(0, 3*document.body.scrollHeight / 4)'
# browser.execute_script(js)
# time.sleep(1)
#
# js = 'window.scrollTo(0, 4*document.body.scrollHeight / 4)'
# browser.execute_script(js)
# time.sleep(1)
# print(f'----------点击{page}页--------')
# next_input = wait.until(
# EC.element_to_be_clickable((By.XPATH, '//*[@id="J_bottomPage"]/span[1]/a[9]'))
# )
# next_input.click()
page_input = wait.until(
EC.presence_of_element_located((By.XPATH, '// *[ @ id = "J_bottomPage"] / span[2] / input'))
)
page_input.clear()
print(f'---------第{page}页----')
page_input.send_keys(str(page))
button = wait.until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="J_bottomPage"]/span[2]/a'))
)
button.click()
# 点击过后,页面将重新加载,主动等待3秒后再获取源码
time.sleep(10)
html = browser.page_source
return html
# 判断当前页是否调转过来了
def parse_html(html):
tree = etree.HTML(html)
goods_list = tree.xpath('//*[@id="J_goodsList"]/ul/li')
result = []
for item in goods_list:
# item就是每一个li
data = {
'img': item.xpath('./div/div[1]/a/img/@src'),
'goods_name': item.xpath('./div/div[3]/a/@title'),
'goods_price': item.xpath('./div/div[2]/strong/i/text()')
}
result.append(data)
return result
def save_mongo(data, page):
db = pymongo.MongoClient(host='127.0.0.1', port=27017)
print(len(data))
try:
print(f'第{page}页数据插入成功')
for item in data:
print(item)
db['spider']['jd1'].insert_one(item)
except:
print(f'第{page}页数据插入失败')
if __name__ == '__main__':
# search(): 打开浏览器输入京东,搜索某商品,返回总页码
total = search()
# 正则search匹配字符串内的内容,fallmatch要匹配' -- 字符串标识符,
all_page = int(re.search('(\d+)', total).group())
for page in range(2, 4):
html = next_page(page)
result = parse_html(html)
save_mongo(result, page)