用python脚本遍历下载网站文件夹
2015-08-18 05:19
489 查看
因为需要,希望能下载http://www.xxx.us/上的所有xls/txt/pdf/rtf类型的资料,并按照相同路径保存文档。当然可以直接网上下载一个批量下载工具来搞定,但对于有DIY癖好的人来讲,当然自己写喽。
#coding=utf-8
from bs4 import BeautifulSoup
import os, sys, urllib2,time,random
path = os.getcwd()
root_path = os.path.join(path,u'TempDownload')
if not os.path.isdir(root_path):
os.mkdir(root_path)
def download_loop(url, dl_path):
print url
content = urllib2.urlopen(url)
soup = BeautifulSoup(content)
link = soup.find_all('a')
for download in link:
flink = download.get('href')
if flink.find(".pdf",len(flink)-4) <> -1 or flink.find(".xls",len(flink)-4) <> -1 or flink.find(".txt",len(flink)-4) <> -1 or flink.find(".xlsx",len(flink)-5) <> -1 or flink.find(".rtf",len(flink)-4) <> -1:
print '+'+flink
content2 = urllib2.urlopen(url+flink).read()
with open(dl_path+'/'+flink,'wb') as code:
code.write(content2)
temp = download.get_text()
if flink.find("/",len(flink)-1) <> -1 and temp <> u'Parent Directory':
directory=str(flink[:-1])
file_path= os.path.join(dl_path,directory.replace("%20"," "))
if not os.path.isdir(file_path):
os.mkdir(file_path)
print '-'+flink + ' || path is:' + str(dl_path)
new_url=url+flink
download_loop(new_url, file_path)
## print u'download completed'
download_loop('http://www.xxx.us/', root_path)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~END~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
raw_input("Press <Enter> To Quit!")
#coding=utf-8
from bs4 import BeautifulSoup
import os, sys, urllib2,time,random
path = os.getcwd()
root_path = os.path.join(path,u'TempDownload')
if not os.path.isdir(root_path):
os.mkdir(root_path)
def download_loop(url, dl_path):
print url
content = urllib2.urlopen(url)
soup = BeautifulSoup(content)
link = soup.find_all('a')
for download in link:
flink = download.get('href')
if flink.find(".pdf",len(flink)-4) <> -1 or flink.find(".xls",len(flink)-4) <> -1 or flink.find(".txt",len(flink)-4) <> -1 or flink.find(".xlsx",len(flink)-5) <> -1 or flink.find(".rtf",len(flink)-4) <> -1:
print '+'+flink
content2 = urllib2.urlopen(url+flink).read()
with open(dl_path+'/'+flink,'wb') as code:
code.write(content2)
temp = download.get_text()
if flink.find("/",len(flink)-1) <> -1 and temp <> u'Parent Directory':
directory=str(flink[:-1])
file_path= os.path.join(dl_path,directory.replace("%20"," "))
if not os.path.isdir(file_path):
os.mkdir(file_path)
print '-'+flink + ' || path is:' + str(dl_path)
new_url=url+flink
download_loop(new_url, file_path)
## print u'download completed'
download_loop('http://www.xxx.us/', root_path)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~END~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
raw_input("Press <Enter> To Quit!")
相关文章推荐
- Python动态类型的学习---引用的理解
- Python3写爬虫(四)多线程实现数据爬取
- 垃圾邮件过滤器 python简单实现
- 下载并遍历 names.txt 文件,输出长度最长的回文人名。
- install and upgrade scrapy
- Scrapy的架构介绍
- Centos6 编译安装Python
- 使用Python生成Excel格式的图片
- 让Python文件也可以当bat文件运行
- [Python]推算数独
- Python中zip()函数用法举例
- Python中map()函数浅析
- Python将excel导入到mysql中
- Python在CAM软件Genesis2000中的应用
- 使用Shiboken为C++和Qt库创建Python绑定
- FREEBASIC 编译可被python调用的dll函数示例
- Python 七步捉虫法