您的位置:首页 > 理论基础 > 计算机网络

python编写网络爬虫程序

2012-04-06 11:52 686 查看
    工作了,需要抓取一些特定的资源进行分析。没有高速海量数据那么大的需求。所以想到用python直接写一个简单的爬虫,之后再编写模板进行定向解析,就解决问题了。毕竟我们的时间要求和数量级要求不是很高。
    在网上搜索了一些python编写爬虫的文档,下载下来试试,看看效果。虽然我的这篇文章标明是原创,但是文章中也有一部分是参考了别人的信息,请谅解。因为我是想参考别人的精华,组合重构,来实现我需要的系统。
    先是从网上下载一个别人编写的爬虫试试效果,网址如下:http://blog.csdn.net/cashey1991/article/details/6262704
Test.py

# -*- coding: utf-8 -*-
import WebCrawler

url = raw_input('设置入口url(例-->http://www.baidu.com): \n')
thNumber = int(raw_input('设置线程数:'))    #之前类型未转换出bug
Maxdepth = int(raw_input('最大搜索深度:'))

wc = WebCrawler.WebCrawler(thNumber, Maxdepth)
wc.Craw(url)

WebCrawler.py

# -*- coding: utf-8 -*-
import threading
import GetUrl
import urllib

g_mutex = threading.Lock()
g_pages = []      #线程下载页面后,将页面内容添加到这个list中
g_dledUrl = []    #所有下载过的url
g_toDlUrl = []    #当前要下载的url
g_failedUrl = []  #下载失败的url
g_totalcount = 0  #下载过的页面数

class WebCrawler:
def __init__(self,threadNumber,Maxdepth):
self.threadNumber = threadNumber
self.threadPool = []
self.Maxdepth = Maxdepth
self.logfile = file('#log.txt','w')                                   ##

def download(self, url, fileName):
Cth = CrawlerThread(url, fileName)
self.threadPool.append(Cth)
Cth.start()

def downloadAll(self):
global g_toDlUrl
global g_totalcount
i = 0
while i < len(g_toDlUrl):
j = 0
while j < self.threadNumber and i + j < len(g_toDlUrl):
g_totalcount += 1    #进入循环则下载页面数加1
self.download(g_toDlUrl[i+j],str(g_totalcount)+'.htm')
print 'Thread started:',i+j,'--File number = ',g_totalcount
j += 1
i += j
for th in self.threadPool:
th.join(30)     #等待线程结束,30秒超时
self.threadPool = []    #清空线程池
g_toDlUrl = []    #清空列表

def updateToDl(self):
global g_toDlUrl
global g_dledUrl
newUrlList = []
for s in g_pages:
newUrlList += GetUrl.GetUrl(s)   #######GetUrl要具体实现
g_toDlUrl = list(set(newUrlList) - set(g_dledUrl))    #提示unhashable

def Craw(self,entryUrl):    #这是一个深度搜索,到g_toDlUrl为空时结束
g_toDlUrl.append(entryUrl)
self.logfile.write('>>>Entry:\n')                                      ##
self.logfile.write(entryUrl)                                           ##
depth = 0
while len(g_toDlUrl) != 0 and depth <= self.Maxdepth:
depth += 1
print 'Searching depth ',depth,'...\n\n'
self.downloadAll()
self.updateToDl()
content = '\n>>>Depth ' + str(depth)+':\n'                         ##
self.logfile.write(content)                                        ##
i = 0                                                              ##
while i < len(g_toDlUrl):                                          ##
content = str(g_totalcount + i + 1) + '->' + g_toDlUrl[i] + '\n'#   ##
self.logfile.write(content)                                    ##
i += 1                                                         ##

class CrawlerThread(threading.Thread):
def __init__(self, url, fileName):
threading.Thread.__init__(self)
self.url = url    #本线程下载的url
self.fileName = fileName

def run(self):    #线程工作-->下载html页面
global g_mutex
global g_failedUrl
global g_dledUrl
try:
f = urllib.urlopen(self.url)
s = f.read()
fout = file(self.fileName, 'w')
fout.write(s)
fout.close()
except:
g_mutex.acquire()    #线程锁-->锁上
g_dledUrl.append(self.url)
g_failedUrl.append(self.url)
g_mutex.release()    #线程锁-->释放
print 'Failed downloading and saving',self.url
return None    #记着返回!

g_mutex.acquire()    #线程锁-->锁上
g_pages.append(s)
g_dledUrl.append(self.url)
g_mutex.release()    #线程锁-->释放


GetUrl.py
# -*- coding: cp936 -*-
urlSep = ['<','>','//','(',')', r'"', r"'", ' ', '\t', '\n']
urlTag = ['http://']

def is_sep(ch):
for c in urlSep:
if c == ch:
return True
return False

def find_first_sep(i,s):
while i < len(s):
if is_sep(s[i]):
return i
i+=1
return len(s)

def GetUrl(strPage):
rtList = []
for tag in urlTag:
i = 0
i = strPage.find(tag, i, len(strPage))
while i != -1:
begin = i
end = find_first_sep(begin+len(tag),strPage)
rtList.append(strPage[begin:end])
i = strPage.find(tag, end, len(strPage))

return rtList


 
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息