一个简单的Python爬虫
2016-04-09 12:25
531 查看
基于慕课网的教程实现了一个简单的爬虫,这个Python爬虫可以爬取百度百科上面与Python词条相关的词条的名子和摘要信息,并保存在名为output.html的文件里。
爬虫一共由五个部分组成,分别是spyder_main.py、html_downloader.py、html_outputer.py、html_parser.py和url_manager.py
spyder_main.py:主函数,主要用于实现模块间的调度
html_downloader.py:用来下载url
html_outputer.py:用来收集爬取的数据并输出成html
html_parser.py:用来解析url页面
url_manager.py:用于管理已经下载和还没有下载的url
具体代码如下:
spyder_main.py
html_downloader.py
html_outputer.py
html_parser.py
url_manager.py
爬虫一共由五个部分组成,分别是spyder_main.py、html_downloader.py、html_outputer.py、html_parser.py和url_manager.py
spyder_main.py:主函数,主要用于实现模块间的调度
html_downloader.py:用来下载url
html_outputer.py:用来收集爬取的数据并输出成html
html_parser.py:用来解析url页面
url_manager.py:用于管理已经下载和还没有下载的url
具体代码如下:
spyder_main.py
import url_manager import html_downloader import html_outputer import html_parser class SpiderMain(object): count = 1 def __init__(self): self.urls = url_manager.UrlManager() self.downloader = html_downloader.HtmlDownloader() self.parser = html_parser.HtmlParser() self.outputer = html_outputer.HtmlOutputer() def craw(self,root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() print 'craw %d:%s'%(count,new_url) html_cont = self.downloader.download(new_url) new_urls,new_data = self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count ==10: break count = count + 1 except: print 'craw failed' self.outputer.output_html() if __name__ == '__main__': root_url = 'http://baike.baidu.com/link?url=iYGd6RePOS1xkyTpV0OoSrZ96YRjaPmaxxLgEH4yQFlepo11sn5g4E0oi6cu1hihq651LtUSr9ZVjIY4ePnPla' //root_url是百度百科python页面的链接 obj_spider = SpiderMain() obj_spider.craw(root_url)
html_downloader.py
import urllib2 class HtmlDownloader(object): def download(self,url): if url is None: return None response = urllib2.urlopen(url) if response.getcode()!=200: return None return response.read()
html_outputer.py
class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self,data): if data is None: return self.datas.append(data) return self.datas def output_html(self): fout = open('output.html','w') fout.write('<html>') fout.write('<meta charset="utf-8">') fout.write('<body>') fout.write('<table>') for data in self.datas: fout.write('<tr>') fout.write('<td>%s</td>' % data['url']) fout.write('<td>%s</td>' % data['title'].encode('utf-8')) fout.write('<td>%s</td>' % data['summary'].encode('utf-8')) fout.write('</tr>') fout.write('</table>') fout.write('</body>') fout.write('</html>')
html_parser.py
from bs4 import BeautifulSoup import re import urlparse class HtmlParser(object): def _get_new_urls(self,page_url,soup): new_urls = set() #/view/123.html links = soup.find_all('a',href=re.compile(r"/view/\d+\.htm")) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url,new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self,page_url,soup): res_data = {} res_data['url']=page_url #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1> title_node = soup.find("dd",class_ = "lemmaWgt-lemmaTitle-title").find('h1') res_data['title'] = title_node.get_text() #<div class="lemma-summary" label-module="lemmaSummary"> summary_node = soup.find('div',class_ ='lemma-summary') res_data['summary'] = summary_node.get_text() return res_data def parse(self,page_url,html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8') new_urls = self._get_new_urls(page_url,soup) new_data = self._get_new_data(page_url,soup) return new_urls,new_data
url_manager.py
class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self,url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self,urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls)!=0 def get_new_url(self): new_url = self.new_urls.pop() self.old_urls.add(new_url) return new_url
相关文章推荐
- python 装饰器
- python之路2
- python 安装numpy,scipy,matplotlib算法库
- python学习笔记(excel+requests)
- python tornado微信开发
- python 学习笔记3(循环方式;list初始化;循环对象/生成器/表推导;函数对象;异常处理)
- Python 练习实例2
- python 分页
- Python之路【第十八篇】Django小项目webQQ实现
- python中if __name__ == '__main__': 的解析
- Counting Bobs!
- python安装paramiko模块
- 在MacOS下Python安装lxml报错xmlversion.h not found 报错的解决方案
- python4.3笔记汇总(图片上不去啊)
- python 断言assert用法
- python中date、datetime、string的相互转换
- Python 练习实例1
- Python开发简单爬虫学习笔记(2)
- Python开发简单爬虫学习笔记(1)
- Mac环境下为Python安装MySQLdb库时遇到的诸多问题