您的位置:首页 > 编程语言 > Python开发

python爬虫基础封装函数

2018-08-13 20:10 645 查看
[code]# 导入包,模块
from urllib import request, parse
from urllib.error import HTTPError, URLError
#保存cookie
from http import cookiejar

#对于有登录是  所将用户记录下来 ,
class session(object):
def __init__(self):
        cookie_object = cookiejar.CookieJar()
        #handler  对应着一个操作
        handler = request.HTTPCookueProcessor(cookie_object)
        #调用handler内部的一个函数,存储到cookie  object
        self.opener = request.build_opener(handler)
 
# get请求时
    def get(self, url, headers=None):
        return get(url, headers, self.opener)
#post请求时
def post(self, url, from, headers=None):
        return post(url, form, headers, self.opener)

# 定义函数
# get方法
def get(url, headers=None):
return urlrequests(url, headers=headers)

# post方法
def post(url, form, headers=None):
return urlrequests(url,form, headers = headers)

# 进行封装函数
# 1.传入url
# 2.user_agent
# 3.headers
# 4.定义Request
# 5.urlopen
# 6.返回byte数组

def urlrequests(url, form =None, headers=None):
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
if headers == None:
headers = {
'User-Agent': user_agent
}
html_bytes = b''
try:
if form:
# POST  转换成str
from_str = parse.urlencode(
form)
# 转换成bytes
form_bytes = form_str.encode('utf-8')
req = request.Request(url, data=form_bytes, headers=headers)
else:
# GET
req = request.Request(url, headers=headers)

response = request.urlopen(req)
html_bytes = response.read()

except HTTPError as e:
print(e)
except URLError as e:
print(e)
return html_bytes
if __name__ == '__main__':
#百度翻译
# url = 'http://fanyi.baidu.com/sug'
# form = {
# 'kw': '翻译的内容'
# }
# #post访问
# html_bytes = post(url, form=form)
# print(html_bytes)

url = 'http://www.baidu.com'
# get访问
html_byte = get(url)
print(html_byte)
阅读更多
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: