您的位置:首页 > 编程语言 > Python开发

用Python制作简单的爬虫---爬虫基本思想

2016-05-05 14:47 731 查看
</pre><p>以http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_0.html这个网站为例,我们爬取的深度只有一层,只是通过这个例子简单阐述爬虫的基本思想:</p><p>先上图贴代码:</p><pre name="code" class="python"># -*- coding: utf-8 -*-

from  bs4 import BeautifulSoup
import requests
import re
import Queue
import pdb
import time
import threading
import json
import codecs
"""
isCourtPub = Field()       #是否是法院公告
pubType = Field()          #公告类型
pubPerson = Field()        #公告人
client = Field()           #当事人
pubDate = Field()          #发布时间
pdfLink = Field()          #PDF下载网址
detailLink= Field()        #公告链接地址
collectTime = Field()      #采集时间
"""

url_queue = Queue.Queue()
url_set = set()
match_rule_suffix ='\d+_\d+.html'
start_urls = [
"http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_0.html",
"http://rmfygg.court.gov.cn/psca/lgnot/bulletin/page/0_1.html"
]
base_url = "http://rmfygg.court.gov.cn"

mutex = threading.Lock()

class CrawlSpider(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)

def run(self):
while(url_queue.qsize()>0):
if mutex.acquire(10):
current_url = url_queue.get()                   #拿出队列中第一个的url
mutex.release()
follow_up_url_list = self.parse_html(current_url)
for url in follow_up_url_list:                      #将url放到队列和集合中
if url not in url_set:
url_set.add(url)
url_queue.put(url)

def follow_up_url(self,url,css_soup):                       #寻找到跟进的url
follow_up_url_list = []
extract_urls = css_soup.find_all('a')
rule_match = '.+' + match_rule_suffix
rule = re.compile(rule_match)
for i in range(len(extract_urls)):
match_url = rule.match(extract_urls[i]['href'])
if match_url :
specific_url = base_url + match_url.group()
follow_up_url_list.append(specific_url)
return follow_up_url_list

def extract_data(self,url,css_soup):                       #提取网页所需数据
item = {}
type_tag = css_soup.find_all('ul')
if url.split('/')[-1][0] == '0':
announcement_type = type_tag[0].find_all('li')[0].string
if url.split('/')[-1][0] == '1':
announcement_type = type_tag[0].find_all('li')[1].string
contents = css_soup.find_all('tr')
for i in range(len(contents[1:])):
item["isCourtPub"] = announcement_type
item["pubType"] =  contents[i+1].find_all('td')[0].string
item["pubPerson"] = contents[i+1].find_all('td')[1].string
item["client"]  = contents[i+1].find_all('td')[2].string
item["pubDate"]  = contents[i+1].find_all('td')[3].string
item["pdfLink"]  =base_url +  contents[i+1].find_all('td')[4].a['href']
item["detailLink"]  = base_url + contents[i+1].find_all('td')[2].a['href']
item["collectTime"]  = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.save(item)

def parse_html(self,url):
css_soup = BeautifulSoup(requests.get(url).text)
follow_up_url_list = self.follow_up_url(url,css_soup)
self.extract_data(url,css_soup)
return follow_up_url_list

def save(self,item):
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
file = codecs.open('courtannounce.json', 'a+', encoding='utf-8')
file.write(line)

def main():
#将初始的链接放到一个队列中
for url in start_urls:
url_set.add(url)
url_queue.put(url)
for i in range(10):
thread = CrawlSpider()
thread.start()
time.sleep(1)

if __name__ == "__main__":
main()

上面是爬取公告的基本思想,先将初始链接地址放到一个队列中(用来对请求调度)和一个set集合中(用于去重)

爬取过程中不仅对请求的数据页面做解析(extract_data),提取数据,同时去匹配我们需要的跟进的链接,然后将

跟进的链接放到queue和set集合中
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  爬虫 python