python爬虫——爬取链家房价信息(未完待续)
2017-04-01 15:23
661 查看
爬取链家房价信息(未完待续)
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class LianjiaItem(scrapy.Item):
info1 = scrapy.Field()#小区名字
info1_url = scrapy.Field()#小区主页地址
info2 = scrapy.Field()#小区地址
info2_dizhi = scrapy.Field()#小区地址
info3 = scrapy.Field()#小区地址
info4 = scrapy.Field()#小区地址
settings.py
这里仅仅需要注意一个点:
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
这里需要设置为 false 因为有的网站禁止爬虫
# -*- coding: utf-8 -*-
# Scrapy settings for lianjia project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'lianjia'
SPIDER_MODULES = ['lianjia.spiders']
NEWSPIDER_MODULE = 'lianjia.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'lianjia (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = {
# 'lianjia.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = {
# 'lianjia.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = {
# 'lianjia.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
scrapy.py
#coding:utf-8
import scrapy,time
import re
from scrapy.http import Request
from lianjia.items import LianjiaItem
class Myspider(scrapy.Spider):
name='lianjia'
allow_domains=['lianjia.com']
base_url='http://bj.lianjia.com/ershoufang/tongzhou/pg'
def start_requests(self):
for i in range(1,2):
url=self.base_url+str(i)
yield Request(url,self.parse)
def parse(self,response):
print response.url
items=LianjiaItem()
#print response.page_source
for info in response.xpath('//div[4]/div[1]/ul/li'):
info1= info.xpath('div/div[1]/a/text()').extract_first()
c151
info1_url= info.xpath('div/div[1]/a/@href').extract_first()
info2= info.xpath('div/div[2]/div/text()').extract_first()
info2_dizhi= info.xpath('div/div[3]/div/text()').extract_first()
info3= info.xpath('div/div[3]/div/a/text()').extract_first()
info4=info.xpath('div/div[4]/text()').extract_first()
items['info1']=info1
items['info1_url']=info1_url
items['info2']=info2
items['info2_dizhi']=info2_dizhi
items['info3']=info3
items['info4']=info4
yield Request(info1_url,callback=self.parse_mainurl,meta={'items':items})
def parse_mainurl(self,response):
items=response.meta['items']
print response.url
print items['info1'],items['info1_url']
print items['info2']
print items['info2_dizhi'], items['info3']
print items['info4']
print response.xpath('//div[3]/div/div/a/text()').extract_first()
items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class LianjiaItem(scrapy.Item):
info1 = scrapy.Field()#小区名字
info1_url = scrapy.Field()#小区主页地址
info2 = scrapy.Field()#小区地址
info2_dizhi = scrapy.Field()#小区地址
info3 = scrapy.Field()#小区地址
info4 = scrapy.Field()#小区地址
settings.py
这里仅仅需要注意一个点:
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
这里需要设置为 false 因为有的网站禁止爬虫
# -*- coding: utf-8 -*-
# Scrapy settings for lianjia project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'lianjia'
SPIDER_MODULES = ['lianjia.spiders']
NEWSPIDER_MODULE = 'lianjia.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'lianjia (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = {
# 'lianjia.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = {
# 'lianjia.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = {
# 'lianjia.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
scrapy.py
#coding:utf-8
import scrapy,time
import re
from scrapy.http import Request
from lianjia.items import LianjiaItem
class Myspider(scrapy.Spider):
name='lianjia'
allow_domains=['lianjia.com']
base_url='http://bj.lianjia.com/ershoufang/tongzhou/pg'
def start_requests(self):
for i in range(1,2):
url=self.base_url+str(i)
yield Request(url,self.parse)
def parse(self,response):
print response.url
items=LianjiaItem()
#print response.page_source
for info in response.xpath('//div[4]/div[1]/ul/li'):
info1= info.xpath('div/div[1]/a/text()').extract_first()
c151
info1_url= info.xpath('div/div[1]/a/@href').extract_first()
info2= info.xpath('div/div[2]/div/text()').extract_first()
info2_dizhi= info.xpath('div/div[3]/div/text()').extract_first()
info3= info.xpath('div/div[3]/div/a/text()').extract_first()
info4=info.xpath('div/div[4]/text()').extract_first()
items['info1']=info1
items['info1_url']=info1_url
items['info2']=info2
items['info2_dizhi']=info2_dizhi
items['info3']=info3
items['info4']=info4
yield Request(info1_url,callback=self.parse_mainurl,meta={'items':items})
def parse_mainurl(self,response):
items=response.meta['items']
print response.url
print items['info1'],items['info1_url']
print items['info2']
print items['info2_dizhi'], items['info3']
print items['info4']
print response.xpath('//div[3]/div/div/a/text()').extract_first()
相关文章推荐
- Python爬虫实战之爬取链家广州房价_03存储
- python爬虫爬取链家二手房信息
- Python爬虫实战之爬取链家广州房价_04链家的模拟登录(记录)
- python爬虫爬取链家二手房信息
- Python爬虫实战之爬取链家广州房价_01简单的单页爬虫
- python3爬虫抓取链家上海租房信息
- Python爬取链家房价信息
- Python爬虫实战之爬取链家广州房价_02把小爬虫变大
- Python开发网络爬虫抓取某同城房价信息
- Python数据爬虫,爬链家的二手房信息
- python 爬虫获取网站信息(二)
- Python爬虫框架Scrapy实战之批量抓取招聘信息
- Python爬虫框架Scrapy实战之定向批量获取职位招聘信息
- 开发记录_自学Python写爬虫程序爬取csdn个人博客信息
- python爬虫上教学信息网
- [Python爬虫] Selenium+Phantomjs动态获取CSDN下载资源信息和评论
- python scrapy 爬虫 未完待续
- python3.4学习笔记(十三) 网络爬虫实例代码,使用pyspider抓取多牛投资吧里面的文章信息,抓取政府网新闻内容
- [python和大数据-1]利用爬虫登录知乎进行BFS搜索抓取用户信息本地mysql分析【PART1】
- Python编写网页爬虫爬取oj上的代码信息