urllib2下载网页的方法
第一种:最简洁的方法 使用urllib2.urlopen()方法
#在python3里面,用urllib.request代替urllib2
import urllib.request
#链接
url = 'http://www.baidu.com'
#直接请求
response = urllib.request.urlopen(url)
#指定编码请求方法1
with urllib.request.urlopen(url)as response:
#print(response.read().decode('utf8'))
#指定编码请求方法2:
f = urllib.request.urlopen(url)
#print(f.read().decode('utf8')) #报错是因为GBK不能解析非法多字节序列。
#获取状态码,如果是200表示获取成功。
print (response.getcode())
#读取内容
cont = response.read()
#print(cont)
第二种方法:添加data、http header
- data:需要用户输入的数据(PUT\CGI\POST\GET请求)
- http header:向服务器提交头信息
PUT请求 - 把消息本体中的消息发送到一个URL,跟POST类似,但不常用。
import urllib.request
DATA = b'some data'
req = urllib.request.Request(url = url, data = DATA , method = 'PUT')
#创建Request对象
request = urllib.request.urlopen(req)
print(request.status)
print(request.reason)
发送数据请求,CGI程序处理
CGI(Common Gateway Interface),通用网关接口,它是一段程序,运行在服务器上如:HTTP服务器,提供同客户端HTML页面的接口。
import urllib.request
req = urllib.request.Request(url = url,data = b'This is a CGI')
f = urllib.request.urlopen(req)
#print(f.read().decode('utf-8'))
GET请求
import urllib.request
import urllib.parse
params = urllib.parse.urlencode({'wd':'PythonGET请求'})
print(params) # wd=PythonGET请求
f = urllib.request.urlopen('http://www.baidu.com/s?%s' % params)
#print(f.read().decode('utf-8'))
POST 请求
#方法1
import urllib.request
import urllib.parse
data = urllib.parse.urlencode({'wd':"PythonPOST请求"})
data = data.encode('utf-8')
request = urllib.request.Request('http://www.baidu.com/s?')
#往请求头中添加字符集参数内容
request.add_header("Content-Type", "application/json")
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko')
f = urllib.request.urlopen(request, data)
#print(f.read().decode('utf-8'))
#方法2
from urllib import request,parse
textmod = urllib.parse.urlencode({"wd": "PythonPOST请求"}).encode(encoding='UTF8')
header_dict = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',"Content-Type": "application/json"}
url='http://www.baidu.com/s?'
req = request.Request(url=url,data=textmod,headers=header_dict)
res = request.urlopen(req)
print(res.getcode())
#print(res.read().decode('utf-8'))
添加HTTP Header
#添加 http headers
import urllib.request
req = urllib.request.Request('http://www.example.com/')
req.add_header('Referer', 'http://www.python.org/')
r = urllib.request.urlopen(req)
#添加 user-agent
import urllib.request
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
opener.open('http://www.example.com/')
第三种添加特殊情景的处理器
- HTTPCookieprocessor:需要登录才能
- ProxyHandler:需要代理
- HTTPSHandler:https加密访问
- HTTPRedirectHandler:相互自动跳转的方式
基本HTTP验证,登录请求
import urllib.request
#创建支持openerdirector的基本的http认证
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='kadidd!ehopper')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
urllib.request.urlopen('http://mail.163.com/')
HTTPCookieprocessor
import urllib.request,http.cookiejar
#创建cookie容器
cj = http.cookiejar.CookieJar()
#创建1个opener
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
#给urllib.request安装opener
urllib.request.install_opener(opener)
#使用带有Cookie的urllib.request访问网页
response = urllib.request.urlopen('http://www.baidu.com')
ProxyHandler
#指定代理方式请求
import urllib.request
proxies = {'http':url}
opener = urllib.request.FancyURLopener(proxies)
f = opener.open(url)
#print(f.read().decode('utf-8'))
#无添加代理
import urllib.request
opener = urllib.request.FancyURLopener({})
f = opener.open(url)
#print(f.read().decode('utf-8'))
HTTPRedirectHandler
#!/usr/bin/python
# -*- coding: utf-8 -*-
#encoding=utf-8
#Filename:states_code.py
import urllib.request
class RedirctHandler(urllib.request.HTTPRedirectHandler):
"""docstring for RedirctHandler"""
def http_error_301(self, req, fp, code, msg, headers):
pass
def http_error_302(self, req, fp, code, msg, headers):
pass
def getUnRedirectUrl(url,timeout=10):
req = urllib.request.Request(url)
debug_handler = urllib.request.HTTPHandler(debuglevel = 1)
opener = urllib.request.build_opener(debug_handler, RedirctHandler)
html = None
response = None
try:
response = opener.open(url,timeout=timeout)
html = response.read()
except urllib.request.URLError as e:
if hasattr(e, 'code'):
error_info = e.code
elif hasattr(e, 'reason'):
error_info = e.reason
finally:
if response:
response.close()
if html:
return html
else:
return error_info
html = getUnRedirectUrl('http://jb51.net')
print(html)