您的位置:首页 > 编程语言 > Python开发

python3下BeautifulSoup练习一(爬取小说)

2017-10-11 00:00 281 查看
from bs4 import BeautifulSoup
import urllib.request
import re
import os,time

def getUrls(url):
urls = []
#url = 'http://www.ahzww.net/0/178/'
req = urllib.request.Request(url)
page = urllib.request.urlopen(req)

html = page.read()

soup = BeautifulSoup(html,'html.parser')

i = 0
for k in soup.find_all(href=re.compile('.html')):
#print('www.qu.la'+k['href'],k.get_text())
if i != 0:
urls.append('http://www.ahzww.net'+k['href'])
i = i+1
return urls

def getContent(url):
#url = 'http://www.ahzww.net/0/178/355185.html'
headers = ('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')
opener = urllib.request.build_opener()
opener.addheaders = [headers]
html = opener.open(url).read()

soup = BeautifulSoup(html,'html.parser')
content = soup.find('div',id='content')
title = soup.find('h1')
return title.get_text(),content.get_text()

if __name__ == '__main__':
urls = getUrls('http://www.ahzww.net/0/178/')
#print(urls)
fp = open("不负娇宠.txt","w")
for url in urls:
print(url)
title,content = getContent(url)
fp.write(title+"\n")
fp.write(content.replace('     ','\n')+"\n")
time.sleep(2)
fp.close()
print("Done")
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: