Python 爬虫的实践运用(1)--爬取百度百科的词条
2016-01-03 11:03
701 查看
参考地址:
Python开发简单爬虫
简介:
一共分为5个部分:
spider_main #主程序
UrlManager #url管理
HtmlDownloader #网页下载器
HtmlParser #网页解析器
HtmlOutputer #解析结果输出
具体代码:
spider_main
UrlManager
HtmlDownloader:
HtmlParser:
HtmlOutputer :
结果展示:
Python开发简单爬虫
简介:
一共分为5个部分:
spider_main #主程序
UrlManager #url管理
HtmlDownloader #网页下载器
HtmlParser #网页解析器
HtmlOutputer #解析结果输出
具体代码:
spider_main
#coding utf8 from BaiduBaike import html_download, html_parser, html_outputer, url_Manager class SpiderMain(object): def __init__(self): self.urls = url_Manager.UrlManager() self.downloader = html_download.HtmlDownloader() self.parser = html_parser.HtmlParser() self.outputer = html_outputer.HtmlOutputer() def craw(self, root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() print 'craw %d : %s'% (count,new_url) html_cont = self.downloader.download(new_url) new_urls ,new_data = self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 100: break count = count + 1 except: print 'craw failed' self.outputer.output_html() if __name__ == '__main__': root_url = 'http://baike.baidu.com/view/21087\.htm' obj_spider = SpiderMain() obj_spider.craw(root_url)
UrlManager
#coding utf8 class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self,url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self,urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls) != 0 def get_new_url(self): new_url = self.new_urls.pop() self.old_urls.add(new_url) return new_url
HtmlDownloader:
#coding utf8 import urllib2 class HtmlDownloader(object): def download(self, url): if url is None: return None response = urllib2.urlopen(url) if response.getcode() != 200: return None return response.read()
HtmlParser:
#coding utf8 from bs4 import BeautifulSoup import re import urlparse from test.inspect_fodder import Tit class HtmlParser(object): def _get_new_urls(self, page_url, soup): new_urls = set() #21087 links = soup.find_all('a', href=re.compile(r'/view/\d+\.htm')) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url, new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): res_data = {} res_data['url'] = page_url title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1') res_data['title'] = title_node.get_text() summary_node = soup.find('div', class_='lemma-summary') res_data['summary'] = summary_node.get_text() return res_data def parse(self, page_url, html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont, 'html.parser', 4000 from_encoding='utf-8') new_urls = self._get_new_urls(page_url,soup) new_data = self._get_new_data(page_url,soup) return new_urls, new_data
HtmlOutputer :
#coding utf8 class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self, data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html','w') fout.write('<html>') fout.write('<meta charset="utf-8">') fout.write('<body>') fout.write('<table>') for data in self.datas: fout.write('<tr>') fout.write('<td>%s</td>' % data['url']) fout.write('<td>%s</td>' % data['title'].encode('utf-8')) fout.write('<td>%s</td>' % data['summary'].encode('utf-8')) fout.write('</tr>') fout.write('</table>') fout.write('</body>') fout.write('</html>')
结果展示:
相关文章推荐
- Python动态类型的学习---引用的理解
- Python3写爬虫(四)多线程实现数据爬取
- 垃圾邮件过滤器 python简单实现
- 下载并遍历 names.txt 文件,输出长度最长的回文人名。
- install and upgrade scrapy
- Scrapy的架构介绍
- Centos6 编译安装Python
- 使用Python生成Excel格式的图片
- 让Python文件也可以当bat文件运行
- [Python]推算数独
- 爬虫笔记
- Python中zip()函数用法举例
- Python中map()函数浅析
- Python将excel导入到mysql中
- Python在CAM软件Genesis2000中的应用
- 使用Shiboken为C++和Qt库创建Python绑定
- python 代码片段5