Python爬虫学习---------使用beautifulSoup4爬取名言网
2018-03-28 21:43
671 查看
爬取名言网top10标签对应的名言,并存储到mysql中,字段(名言,作者,标签)#! /usr/bin/python3
# -*- coding:utf-8 -*-
from urllib.request import urlopen as open
from bs4 import BeautifulSoup
import re
import pymysql
def find_top_ten(url):
response = open(url)
bs = BeautifulSoup(response,'html.parser')
tags = bs.select('span.tag-item a')
top_ten_href = [tag.get('href') for tag in tags]
top_ten_tag = [tag.text for tag in tags]
# print(top_ten_href)
# print(top_ten_tag)
return top_ten_href
def insert_into_mysql(records):
con = pymysql.connect(host='localhost',user='root',password='root',database='quotes',charset='utf8',port=3306)
cursor = con.cursor()
sql = "insert into quotes(content,author,tags) values(%s,%s,%s)"
for record in records:
cursor.execute(sql, record)
con.commit()
cursor.close()
con.close()
# http://quotes.toscrape.com/tag/love/ #要获取对应标签中所有的名言 所以这里要考虑分页的情况
#经过在网页上查看知道分页查询的url
#http://quotes.toscrape.com/tag/love/page/1/
#判断到那一页没有数据 div.container div.row [1]
def find_link_content(link):
page = 1
while True:
new_link = "http://quotes.toscrape.com" + link + "page/"
# print(new_link)
new_link = new_link + str(page)
print(new_link)
sub_bs = open(new_link)
sub_bs = BeautifulSoup(sub_bs,'html.parser')
quotes = sub_bs.select('div.row div.col-md-8 span.text')
# 如果没有数据就退出
if len(quotes) == 0:
break
#名言
quotes = [quote.text.strip('“”') for quote in quotes]
#作者
authors = sub_bs.select('small.author')
authors = [author.text for author in authors]
# 标签
tags_list = sub_bs.select('meta.keywords')
tags_list = [tags.get('content') for tags in tags_list]
# print(authors)
# print(quotes)
#print(tags_list)
record_list = []
for i in range(len(quotes)):
tags = tags_list[i]
tags = tags.replace(',',',')
print(tags)
record = [quotes[i],authors[i],tags]
record_list.append(record)
insert_into_mysql(record_list)
page += 1
#
def main():
url = "http://quotes.toscrape.com/"
parent_link = find_top_ten(url)
for link in parent_link:
print(link)
find_link_content(link)
if __name__ == '__main__':
main()
# -*- coding:utf-8 -*-
from urllib.request import urlopen as open
from bs4 import BeautifulSoup
import re
import pymysql
def find_top_ten(url):
response = open(url)
bs = BeautifulSoup(response,'html.parser')
tags = bs.select('span.tag-item a')
top_ten_href = [tag.get('href') for tag in tags]
top_ten_tag = [tag.text for tag in tags]
# print(top_ten_href)
# print(top_ten_tag)
return top_ten_href
def insert_into_mysql(records):
con = pymysql.connect(host='localhost',user='root',password='root',database='quotes',charset='utf8',port=3306)
cursor = con.cursor()
sql = "insert into quotes(content,author,tags) values(%s,%s,%s)"
for record in records:
cursor.execute(sql, record)
con.commit()
cursor.close()
con.close()
# http://quotes.toscrape.com/tag/love/ #要获取对应标签中所有的名言 所以这里要考虑分页的情况
#经过在网页上查看知道分页查询的url
#http://quotes.toscrape.com/tag/love/page/1/
#判断到那一页没有数据 div.container div.row [1]
def find_link_content(link):
page = 1
while True:
new_link = "http://quotes.toscrape.com" + link + "page/"
# print(new_link)
new_link = new_link + str(page)
print(new_link)
sub_bs = open(new_link)
sub_bs = BeautifulSoup(sub_bs,'html.parser')
quotes = sub_bs.select('div.row div.col-md-8 span.text')
# 如果没有数据就退出
if len(quotes) == 0:
break
#名言
quotes = [quote.text.strip('“”') for quote in quotes]
#作者
authors = sub_bs.select('small.author')
authors = [author.text for author in authors]
# 标签
tags_list = sub_bs.select('meta.keywords')
tags_list = [tags.get('content') for tags in tags_list]
# print(authors)
# print(quotes)
#print(tags_list)
record_list = []
for i in range(len(quotes)):
tags = tags_list[i]
tags = tags.replace(',',',')
print(tags)
record = [quotes[i],authors[i],tags]
record_list.append(record)
insert_into_mysql(record_list)
page += 1
#
def main():
url = "http://quotes.toscrape.com/"
parent_link = find_top_ten(url)
for link in parent_link:
print(link)
find_link_content(link)
if __name__ == '__main__':
main()
相关文章推荐
- 【Python3.6爬虫学习记录】(二)使用BeautifulSoup爬取简单静态网页文章
- python爬虫——beautifulsoup4使用学习
- python学习(6):python爬虫之requests和BeautifulSoup的使用
- Python爬虫包 BeautifulSoup 学习(十) 各种html解析器的比较及使用
- python爬虫学习笔记——使用requests库编写爬虫(1)
- python的【爬虫】:使用urllib爬取wiki文章,使用beautifulSoup解析html
- python爬虫:使用Mongodb数据库存储数据学习笔记
- python3.4学习笔记(十七) 网络爬虫使用Beautifulsoup4抓取内容
- Python爬虫框架Scrapy 学习笔记 5 ------- 使用pipelines过滤敏感词
- Python爬虫学习(9):Selenium的使用
- Python爬虫包 BeautifulSoup 学习(一) 简介与安装
- Python爬虫学习(1): urllib的使用
- Python爬虫包 BeautifulSoup 学习(二) 异常处理
- 学习python的第十六天-BeautifulSoup和Tkinter的使用
- python3实现网络爬虫(3)--BeautifulSoup使用(2)
- python3.4学习笔记(十三) 网络爬虫实例代码,使用pyspider抓取多牛投资吧里面的文章信息,抓取政府网新闻内容
- 使用python语言结合beautifulsoup编写简单的网络爬虫
- python3实现网络爬虫(4)--BeautifulSoup使用(3)
- python2.7爬虫学习笔记(一)---Urllib库的使用
- python3实现网络爬虫(6)--正则表达式和BeautifulSoup配合使用