您的位置:首页 > 编程语言 > Python开发

python爬虫入门学习

2017-12-14 15:33 232 查看
近期写的一个爬虫的Demo,只是简单的用了几个函数。实现了简单的爬取网页的功能(以途牛为例)。

import urllib2
import re
import urlparse
import robotparser
import datetime
import time

class Throttle:
"""
Add a delay to the same domain between two download
"""
def __init__(self, delay):
#  amount of delay between download of a domain
self.delay = delay
#  timestamp of when a domain was last accessed
self.domains = {}

def wait(self, url):
domain = urlparse.urlparse(url).netloc
last_accessed = self.domains.get(domain)

if self.delay > 0 and last_accessed is not None:
sleep_sec = self.delay - (datetime.datetime.now() - last_accessed).seconds

if sleep_sec >= 0:
time.sleep(sleep_sec)
print 'sleep: ', sleep_sec, 's'
self.domains[domain] = datetime.datetime.now()

def download(url, proxy, user_agent='wawp', num_retries=2):
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)

opener = urllib2.build_opener()
if proxy:
proxy_param = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_param))
try:
html = opener.open(request).read()
except urllib2.URLError as e:
print 'Downloading error:', e.reason, '\n'
html = ''
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
return download(url, proxy, user_agent, num_retries - 1)
return html

def get_links(html, regstr=r'http:\/\/[^w].*\.tuniu\.com'):
reg = regstr
rexp = re.compile(reg)
return re.findall(rexp, html)

def deduplicate_list(inputList):
new_list = []
for x in inputList:
if x not in new_list:
new_list.append(x)
return new_list

def crawl_sitemap(url):
sitemap = download(url)
links = get_links(sitemap)
print 'before links are : ', links
newlinks = deduplicate_list(links)
print 'after links are : ', newlinks

for link in newlinks:
print link
download(link)

def get_robot(url):
rp = robotparser.RobotFileParser()
rp.set_url(urlparse.urljoin(url, 'robots.txt'))
rp.read()
return rp

def link_crawler(seed_url, max_depth=3, link_regex=r'http:\/\/[^w][^"]*\.tuniu\.com', delay=1, proxy=None):
# For robots.txt check install
rp = get_robot(seed_url)
# init vars
throttle = Throttle(delay)
crwal_queue = [seed_url]
seen = {seed_url: 0}

while crwal_queue:
url = crwal_queue.pop()

depth = seen[url]
if depth != max_depth:

if rp.can_fetch('heimaojingzhang', url):  # here just for joking
throttle.wait(url)
html = download(url, proxy)
#  print 'down func ', url
for link in get_links(html, link_regex):
link = urlparse.urljoin(seed_url, link)
if link not in seen:
seen[link] = depth + 1
crwal_queue.append(link)
else:
print 'Blocked by robot.txt ', url

# TODO:
# fix bugs: (in regex) done on : 2017/09/23 23:16
# delay: done on : 2017/09/24 21:36
# proxy
# depth: done on : 2017/09/23 23:10

if __name__ == '__main__':
link_crawler('http://www.tuniu.com/corp/sitemap.shtml', link_regex=r'http:\/\/www\.tuniu\.com\/guide\/[^"]*')
#  html = download('http://www.tuniu.com/corp/sitemap.shtml')
#  print  html


View Code
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: