网络爬虫-Selenium

基本使用

from selenium import webdriver

from selenium.webdriver.common.by import By

from selenium.webdriver.common.keys import Keys

from selenium.webdriver.support import expected_conditions as EC

from selenium.webdriver.support.wait import WebDriverWait

browser = webdriver.Chrome()

try:

    browser.get('https://www.baidu.com')

    input = browser.find_element_by_id('kw')

    input.send_keys('Python')

    input.send_keys(Keys.ENTER)

    wait = WebDriverWait(browser, 10)

    wait.until(EC.presence_of_element_located((By.ID, 'content_left')))

    print(browser.current_url)

    print(browser.get_cookies())

    print(browser.page_source)

finally:

    browser.close()

声明浏览器对象

from selenium import webdriver

browser = webdriver.Chrome()

browser = webdriver.Firefox()

browser = webdriver.Edge()

browser = webdriver.PhantomJS()

browser = webdriver.Safari()

访问页面

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.taobao.com')

print(browser.page_source)

browser.close()

查找元素

单个元素

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.taobao.com')

input_first = browser.find_element_by_id('q')

input_second = browser.find_element_by_css_selector('#q')

input_third = browser.find_element_by_xpath('//*[@id="q"]')

print(input_first, input_second, input_third)

browser.close()

find_element_by_name

find_element_by_xpath

find_element_by_link_text

find_element_by_partial_link_text

find_element_by_tag_name

find_element_by_class_name

find_element_by_css_selector

多个元素

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.taobao.com')

lis = browser.find_elements_by_css_selector('.service-bd li')

print(lis)

browser.close()

from selenium import webdriver

from selenium.webdriver.common.by import By

browser = webdriver.Chrome()

browser.get('https://www.taobao.com')

lis = browser.find_elements(By.CSS_SELECTOR, '.service-bd li')

print(lis)

browser.close()

find_elements_by_name

find_elements_by_xpath

* find_elements_by_link_text

* find_elements_by_partial_link_text

* find_elements_by_tag_name

* find_elements_by_class_name

* find_elements_by_css_selector

元素交互操作

对获取的元素调用交互方法

from selenium import webdriver

import time

browser = webdriver.Chrome()

browser.get('https://www.taobao.com')

input = browser.find_element_by_id('q')

input.send_keys('iPhone')

time.sleep(1)

input.clear()

input.send_keys('iPad')

button = browser.find_element_by_class_name('btn-search')

button.click()

更多操作: http://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.remote.webelement

交互动作

将动作附加到动作链中串行执行

from selenium import webdriver

from selenium.webdriver import ActionChains

browser = webdriver.Chrome()

url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'

browser.get(url)

browser.switch_to.frame('iframeResult')

source = browser.find_element_by_css_selector('#draggable')

target = browser.find_element_by_css_selector('#droppable')

actions = ActionChains(browser)

actions.drag_and_drop(source, target)

actions.perform()

更多操作: http://selenium-python.readthedocs.io/api.html#module-selenium.webdriver.common.action_chains

执行JavaScript

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.zhihu.com/explore')

browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')

browser.execute_script('alert("To Bottom")')

获取元素信息

获取属性

from selenium import webdriver

from selenium.webdriver import ActionChains

browser = webdriver.Chrome()

url = 'https://www.zhihu.com/explore'

browser.get(url)

logo = browser.find_element_by_id('zh-top-link-logo')

print(logo)

print(logo.get_attribute('class'))

获取文本值

from selenium import webdriver

browser = webdriver.Chrome()

url = 'https://www.zhihu.com/explore'

browser.get(url)

input = browser.find_element_by_class_name('zu-top-add-question')

print(input.text)

获取ID、位置、标签名、大小

from selenium import webdriver

browser = webdriver.Chrome()

url = 'https://www.zhihu.com/explore'

browser.get(url)

input = browser.find_element_by_class_name('zu-top-add-question')

print(input.id)

print(input.location)

print(input.tag_name)

print(input.size)

Frame

import time

from selenium import webdriver

from selenium.common.exceptions import NoSuchElementException

browser = webdriver.Chrome()

url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'

browser.get(url)

browser.switch_to.frame('iframeResult')

source = browser.find_element_by_css_selector('#draggable')

print(source)

try:

    logo = browser.find_element_by_class_name('logo')

except NoSuchElementException:

    print('NO LOGO')

browser.switch_to.parent_frame()

logo = browser.find_element_by_class_name('logo')

print(logo)

print(logo.text)

等待

隐式等待

        当使用了隐式等待执行测试的时候,如果 WebDriver没有在 DOM中找到元素,将继续等待,超出设定时间后则抛出找不到元素的异常, 换句话说,当查找元素或元素并没有立即出现的时候,隐式等待将等待一段时间再查找 DOM,默认的时间是0

from selenium import webdriver

browser = webdriver.Chrome()

browser.implicitly_wait(10)

browser.get('https://www.zhihu.com/explore')

input = browser.find_element_by_class_name('zu-top-add-question')

print(input)

显式等待

from selenium import webdriver

from selenium.webdriver.common.by import By

from selenium.webdriver.support.ui import WebDriverWait

from selenium.webdriver.support import expected_conditions as EC

browser = webdriver.Chrome()

browser.get('https://www.taobao.com/')

wait = WebDriverWait(browser, 10)

input = wait.until(EC.presence_of_element_located((By.ID, 'q')))

button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '.btn-search')))

print(input, button)

*  title_is 标题是某内容

*  title_contains 标题包含某内容

*  presence_of_element_located 元素加载出,传入定位元组,如(By.ID, 'p')

*  visibility_of_element_located 元素可见,传入定位元组

*  visibility_of 可见,传入元素对象

*  presence_of_all_elements_located 所有元素加载出

*  text_to_be_present_in_element 某个元素文本包含某文字

*  text_to_be_present_in_element_value 某个元素值包含某文字

*  frame_to_be_available_and_switch_to_it frame加载并切换

*  invisibility_of_element_located 元素不可见

*  element_to_be_clickable 元素可点击

*  staleness_of 判断一个元素是否仍在DOM,可判断页面是否已经刷新

*  element_to_be_selected 元素可选择,传元素对象

*  element_located_to_be_selected 元素可选择,传入定位元组

*  element_selection_state_to_be 传入元素对象以及状态,相等返回True,否则返回False

*  element_located_selection_state_to_be 传入定位元组以及状态,相等返回True,否则返回False

*  alert_is_present 是否出现Alert

详细内容:http://selenium-python.readthedocs.io/api.html#module-

selenium.webdriver.support.expected_conditions

前进后退

import time

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.baidu.com/')

browser.get('https://www.taobao.com/')

browser.get('https://www.python.org/')

browser.back()

time.sleep(1)

browser.forward()

browser.close()

Cookies

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.zhihu.com/explore')

print(browser.get_cookies())

browser.add_cookie({'name': 'name', 'domain': 'www.zhihu.com', 'value': 'germey'})

print(browser.get_cookies())

browser.delete_all_cookies()

print(browser.get_cookies())

选项卡管理

import time

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.baidu.com')

browser.execute_script('window.open()')

print(browser.window_handles)

browser.switch_to_window(browser.window_handles[1])

browser.get('https://www.taobao.com')

time.sleep(1)

browser.switch_to_window(browser.window_handles[0])

browser.get('https://python.org')

异常处理

from selenium import webdriver

browser = webdriver.Chrome()

browser.get('https://www.baidu.com')

browser.find_element_by_id('hello')

from selenium import webdriver

from selenium.common.exceptions import TimeoutException, NoSuchElementException

browser = webdriver.Chrome()

try:

    browser.get('https://www.baidu.com')

except TimeoutException:

    print('Time Out')

try:

    browser.find_element_by_id('hello')

except NoSuchElementException:

    print('No Element')

finally:

    browser.close()

详细文档:http://selenium-python.readthedocs.io/api.html#module-selenium.common.exceptions

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容

  • selenium用法详解 selenium主要是用来做自动化测试,支持多种浏览器,爬虫中主要用来解决JavaScr...
    陳_CHEN_陈阅读 3,954评论 1 5
  • selenium主要是用来做自动化测试,支持多种浏览器,爬虫中主要用来解决JavaScript渲染问题。 模拟浏览...
    拾柒丶_8257阅读 2,198评论 0 2
  • # Selenium # # 自动化测试工具,致辞多种浏览器 # 爬虫中主要用来解决JavaScript渲染的问题...
    拾柒丶_8257阅读 398评论 0 0
  • 欲将跳舞当健身, 舞到浓时声不闻。 偷得浮生半日闲, 且做世间快乐人。
    善护念之合一阅读 299评论 0 2
  • 时间,有点放纵 从午夜到黄昏 梦里的风景如此艳丽 一个个美乳丰臀 也有海的辽阔 帆,架起白云 是放弃,还是追逐 一...
    垄上行云阅读 175评论 0 1