python beautifulsoup多线程分析抓取网页
2015-08-31 17:00
519 查看
#encoding=utf-8
#@description:蜘蛛抓取内容。
import Queue
import threading
import urllib,urllib2
import time
from BeautifulSoup import BeautifulSoup
hosts = ["http://www.baidu.com","http://www.163.com"]#要抓取的网页
queue = Queue.Queue()
out_queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
proxy_support = urllib2.ProxyHandler({'http':'http://xxx.xxx.xxx.xxxx'})#代理IP
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
#grabs urls of hosts and then grabs chunk of webpage
url = urllib.urlopen(host)
chunk = url.read()
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.queue.task_done()
class DatamineThread(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, out_queue):
threading.Thread.__init__(self)
self.out_queue = out_queue
def run(self):
while True:
#grabs host from queue
chunk = self.out_queue.get()
#parse the chunk
soup = BeautifulSoup(chunk)
print soup.findAll(['title']))
#signals to queue job is done
self.out_queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
t = ThreadUrl(queue, out_queue)
t.setDaemon(True)
t.start()
#populate queue with data
for host in hosts:
queue.put(host)
dt = DatamineThread(out_queue)
dt.setDaemon(True)
dt.start()
#wait on the queue until everything has been processed
queue.join()
out_queue.join()
main()
print "Elapsed Time: %s" % (time.time() - start)
#@description:蜘蛛抓取内容。
import Queue
import threading
import urllib,urllib2
import time
from BeautifulSoup import BeautifulSoup
hosts = ["http://www.baidu.com","http://www.163.com"]#要抓取的网页
queue = Queue.Queue()
out_queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
proxy_support = urllib2.ProxyHandler({'http':'http://xxx.xxx.xxx.xxxx'})#代理IP
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
#grabs urls of hosts and then grabs chunk of webpage
url = urllib.urlopen(host)
chunk = url.read()
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.queue.task_done()
class DatamineThread(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, out_queue):
threading.Thread.__init__(self)
self.out_queue = out_queue
def run(self):
while True:
#grabs host from queue
chunk = self.out_queue.get()
#parse the chunk
soup = BeautifulSoup(chunk)
print soup.findAll(['title']))
#signals to queue job is done
self.out_queue.task_done()
start = time.time()
def main():
#spawn a pool of threads, and pass them queue instance
t = ThreadUrl(queue, out_queue)
t.setDaemon(True)
t.start()
#populate queue with data
for host in hosts:
queue.put(host)
dt = DatamineThread(out_queue)
dt.setDaemon(True)
dt.start()
#wait on the queue until everything has been processed
queue.join()
out_queue.join()
main()
print "Elapsed Time: %s" % (time.time() - start)
相关文章推荐
- 实例Python处理XML文件的方法
- 【Python爬虫学习笔记(3)】Beautiful Soup库相关知识点总结
- 深入Python(4):深拷贝和浅拷贝
- 观察者模式
- Python 里不能直接赋值方式修改 sys.argv
- python相关资料分享
- python文件操作
- Python学习----函数
- 使用Python的requests库作接口测试——身份认证
- 使用Python的requests库作接口测试——响应头中的链接字段
- python学习-类理解
- Python装饰器
- 都是python写的渗透测试工具
- Python OS
- Python Time
- Python内置函数
- 使用Python的requests库作接口测试——对HPPT动词的支持
- Python中动态添加类的成员
- python用到CMD的几个文件夹操作命令
- Python之yield