爬虫学习5.1.2数据存储(无数据库)存储为CSV
2017-10-15 15:19
543 查看
1.写CSV文件
import csv
headers=['ID','Username','Password','Age','Country']
rows=[(1001,"qiye","qiye_pass","24","China"),
(1002, "Marry", "Marry_pass", "21", "USA"),
(1003, "Jack", "Jack_pass", "20", "USA"),]
with open('qiye.csv','w') as f:
f_csv= csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
2.字典数据
#row列表里的数据 可以是字典数据
import csv
headers=['ID','Username','Password','Age','Country']
rows=[{'ID':1001,'Username':"qiye",'Password':"qiye_pass",'Age':"24",'Country':"China"},
{'ID':1002,'Username':"Marry",'Password':"Marry_pass",'Age':"21",'Country':"USA"},
{'ID':1003,'Username':"Jack",'Password':"Jack_pass",'Age':"20",'Country':"USA"},]
with open('qiye2.csv','w') as f:
f_csv= csv.DictWriter(f,headers)
f_csv.writeheader()
f_csv.writerows(rows)
3.读取数据(列表读取)
import csv
with open('qiye.csv') as f:
f_csv = csv.reader(f)
headers = next(f_csv)
print headers
for row in f_csv:
print row
4.读取数据(索引读取)
python的namedtuple详解:http://blog.csdn.net/kongxx/article/details/51553362
命名分组 然后填入数据,提取数据
import csv
from collections import namedtuple
with open('qiye2.csv') as f:
f_csv = csv.reader(f)
headings = next(f_csv)#迭代获取对象元素
Row = namedtuple('Row',headings)#定义namedtuple类型
for r in f_csv:
row = Row(*r)#获取每行数据创建一个对象
print row.Username,row.Password
print row5.读取到一个字典序列中
import csv
with open('qiye.csv') as f:
f_csv=csv.DictReader(f)
for row in f_csv:
print row.get('Username'),row.get('Password')
6.使用lxml解析 http://seputu.com首页标题 章节和链接等数据
#coding:utf-8
from lxml import etree
import requests
import re
import csv
user_agent = 'Monzilia/4.0 (compatible; MISE 5.5;Windows NT) '
headers = {'User_Agent': user_agent}
r = requests.get('http://seputu.com', headers=headers)
#使用lxml解析网页
html = etree.HTML(r.text)
div_mulus = html.xpath('.//*[@class="mulu"]')
#先找到所有的div class=mulu标记
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
rows=[]
for div_mulu in div_mulus:
#找到所有div_h2标记
div_h2 = div_mulu.xpath('./div[@class="mulu-title"]/center/h2/text()')
if len(div_h2) > 0:
h2_title = div_h2[0].encode('utf-8')
a_s = div_mulu.xpath('./div[@class="box"]/ul/li/a')
for a in a_s:
#找到href属性
href = a.xpath('./@href')[0].encode('utf-8')
#找到title属性
box_title = a.xpath('./@title')[0]
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
match = pattern.search(box_title)
if match!=None:
date = match.group(1).encode('utf-8')
real_title=match.group(2).encode('utf-8')
#print real_title
content = (h2_title,real_title,href,date)
print content
rows.append(content)
headers = ['title','real_title','href','date']
with open('qiye.csv','w') as f:
f_csv = csv.writer(f,)
f_csv.writerow(headers)
f_csv.writerows(rows)
print 'success!'
import csv
headers=['ID','Username','Password','Age','Country']
rows=[(1001,"qiye","qiye_pass","24","China"),
(1002, "Marry", "Marry_pass", "21", "USA"),
(1003, "Jack", "Jack_pass", "20", "USA"),]
with open('qiye.csv','w') as f:
f_csv= csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
2.字典数据
#row列表里的数据 可以是字典数据
import csv
headers=['ID','Username','Password','Age','Country']
rows=[{'ID':1001,'Username':"qiye",'Password':"qiye_pass",'Age':"24",'Country':"China"},
{'ID':1002,'Username':"Marry",'Password':"Marry_pass",'Age':"21",'Country':"USA"},
{'ID':1003,'Username':"Jack",'Password':"Jack_pass",'Age':"20",'Country':"USA"},]
with open('qiye2.csv','w') as f:
f_csv= csv.DictWriter(f,headers)
f_csv.writeheader()
f_csv.writerows(rows)
3.读取数据(列表读取)
import csv
with open('qiye.csv') as f:
f_csv = csv.reader(f)
headers = next(f_csv)
print headers
for row in f_csv:
print row
4.读取数据(索引读取)
python的namedtuple详解:http://blog.csdn.net/kongxx/article/details/51553362
命名分组 然后填入数据,提取数据
import csv
from collections import namedtuple
with open('qiye2.csv') as f:
f_csv = csv.reader(f)
headings = next(f_csv)#迭代获取对象元素
Row = namedtuple('Row',headings)#定义namedtuple类型
for r in f_csv:
row = Row(*r)#获取每行数据创建一个对象
print row.Username,row.Password
print row5.读取到一个字典序列中
import csv
with open('qiye.csv') as f:
f_csv=csv.DictReader(f)
for row in f_csv:
print row.get('Username'),row.get('Password')
6.使用lxml解析 http://seputu.com首页标题 章节和链接等数据
#coding:utf-8
from lxml import etree
import requests
import re
import csv
user_agent = 'Monzilia/4.0 (compatible; MISE 5.5;Windows NT) '
headers = {'User_Agent': user_agent}
r = requests.get('http://seputu.com', headers=headers)
#使用lxml解析网页
html = etree.HTML(r.text)
div_mulus = html.xpath('.//*[@class="mulu"]')
#先找到所有的div class=mulu标记
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
rows=[]
for div_mulu in div_mulus:
#找到所有div_h2标记
div_h2 = div_mulu.xpath('./div[@class="mulu-title"]/center/h2/text()')
if len(div_h2) > 0:
h2_title = div_h2[0].encode('utf-8')
a_s = div_mulu.xpath('./div[@class="box"]/ul/li/a')
for a in a_s:
#找到href属性
href = a.xpath('./@href')[0].encode('utf-8')
#找到title属性
box_title = a.xpath('./@title')[0]
pattern = re.compile(r'\s*\[(.*)\]\s+(.*)')
match = pattern.search(box_title)
if match!=None:
date = match.group(1).encode('utf-8')
real_title=match.group(2).encode('utf-8')
#print real_title
content = (h2_title,real_title,href,date)
print content
rows.append(content)
headers = ['title','real_title','href','date']
with open('qiye.csv','w') as f:
f_csv = csv.writer(f,)
f_csv.writerow(headers)
f_csv.writerows(rows)
print 'success!'
相关文章推荐
- Android数据存储之SQLite 数据库学习
- 爬虫数据导成csv及存入数据库
- 关于Python爬虫无数据库的存储1之存储为csv格式
- [python爬虫] BeautifulSoup爬取+CSV存储贵州农产品数据
- 存储过程学习-批量插入10000条数据进数据库
- Hive简介、什么是Hive、为什么使用Hive、Hive的特点、Hive架构图、Hive基本组成、Hive与Hadoop的关系、Hive与传统数据库对比、Hive数据存储(来自学习资料)
- 存储过程学习-批量插入10000条数据进数据库
- python python 入门学习之网页数据爬虫搜狐汽车数据库
- 使用csv存储爬虫数据乱码问题解决
- 【Android】数据存储数据库SQLite(之前有看到的一篇关于SQLite文章,简单明了、覆盖较全面适合学习)
- Scrapy学习笔记V--Items 爬虫数据的存储.数据模板
- python爬虫:使用Mongodb数据库存储数据学习笔记
- Android课程---关于数据存储的学习(3)之数据库和事务
- 【数据库学习笔记】Oracle_01_sql*plus,存储结构,用户管理,常用数据类型
- 数据库学习纪要(二十二):MySQL创建数据库、表,及导入CSV文件数据2
- 爬虫学习之csv读取和存储
- android菜鸟学习笔记20----Android数据存储(四))Android数据库操作
- 数据库学习笔记:第九章 存储数据:磁盘和文件
- iOS学习之sqlite的创建数据库,表,插入查看数据
- Android学习之 4种数据存储