python3下BeautifulSoup练习一(爬取小说)
2017-10-11 00:00
281 查看
from bs4 import BeautifulSoup
import urllib.request
import re
import os,time
def getUrls(url):
urls = []
#url = 'http://www.ahzww.net/0/178/'
req = urllib.request.Request(url)
page = urllib.request.urlopen(req)
html = page.read()
soup = BeautifulSoup(html,'html.parser')
i = 0
for k in soup.find_all(href=re.compile('.html')):
#print('www.qu.la'+k['href'],k.get_text())
if i != 0:
urls.append('http://www.ahzww.net'+k['href'])
i = i+1
return urls
def getContent(url):
#url = 'http://www.ahzww.net/0/178/355185.html'
headers = ('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')
opener = urllib.request.build_opener()
opener.addheaders = [headers]
html = opener.open(url).read()
soup = BeautifulSoup(html,'html.parser')
content = soup.find('div',id='content')
title = soup.find('h1')
return title.get_text(),content.get_text()
if __name__ == '__main__':
urls = getUrls('http://www.ahzww.net/0/178/')
#print(urls)
fp = open("不负娇宠.txt","w")
for url in urls:
print(url)
title,content = getContent(url)
fp.write(title+"\n")
fp.write(content.replace(' ','\n')+"\n")
time.sleep(2)
fp.close()
print("Done")
import urllib.request
import re
import os,time
def getUrls(url):
urls = []
#url = 'http://www.ahzww.net/0/178/'
req = urllib.request.Request(url)
page = urllib.request.urlopen(req)
html = page.read()
soup = BeautifulSoup(html,'html.parser')
i = 0
for k in soup.find_all(href=re.compile('.html')):
#print('www.qu.la'+k['href'],k.get_text())
if i != 0:
urls.append('http://www.ahzww.net'+k['href'])
i = i+1
return urls
def getContent(url):
#url = 'http://www.ahzww.net/0/178/355185.html'
headers = ('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')
opener = urllib.request.build_opener()
opener.addheaders = [headers]
html = opener.open(url).read()
soup = BeautifulSoup(html,'html.parser')
content = soup.find('div',id='content')
title = soup.find('h1')
return title.get_text(),content.get_text()
if __name__ == '__main__':
urls = getUrls('http://www.ahzww.net/0/178/')
#print(urls)
fp = open("不负娇宠.txt","w")
for url in urls:
print(url)
title,content = getContent(url)
fp.write(title+"\n")
fp.write(content.replace(' ','\n')+"\n")
time.sleep(2)
fp.close()
print("Done")
相关文章推荐
- 【问题】使用BeautifulSoup解析在python2和python3下表现不一样?
- python 爬小说+beautifulsoup安装
- Python+Selenium练习篇之7-利用name定位元素
- python学习(6):python爬虫之requests和BeautifulSoup的使用
- python基础练习---求3*3矩阵主对角线元素之和
- 采集练习(十二) python 采集之 xbmc 酷狗电台插件
- python学习之小的练习程序
- Python--基础练习
- Python 小甲鱼教程 课后练习42
- 某徒步旅游网站python爬虫小练习
- Python BeautifulSoup 简单笔记
- python字典练习
- 读书笔记--用Python写网络爬虫00--建立练习环境
- python的一些练习地址
- Ubuntu系统上Python2和Python3共存时安装BeautifulSoup4
- python核心编程十四章练习
- python项目练习七:自定义公告板
- python练习——用户输入与while循环
- python项目练习一:即时标记
- 笨办法学 Python · 续 练习 47:`bc`