您的位置:首页 > 编程语言 > Python开发

用python脚本遍历下载网站文件夹

2015-08-18 05:19 489 查看
因为需要,希望能下载http://www.xxx.us/上的所有xls/txt/pdf/rtf类型的资料,并按照相同路径保存文档。当然可以直接网上下载一个批量下载工具来搞定,但对于有DIY癖好的人来讲,当然自己写喽。

#coding=utf-8

from bs4 import BeautifulSoup

import os, sys, urllib2,time,random

 

path = os.getcwd()                     

root_path = os.path.join(path,u'TempDownload')

if not os.path.isdir(root_path):

    os.mkdir(root_path)

 

 

def download_loop(url, dl_path):

    print url

    content = urllib2.urlopen(url)

    soup = BeautifulSoup(content)

    link = soup.find_all('a')

    for download in link:

        flink = download.get('href')

        if flink.find(".pdf",len(flink)-4) <> -1 or flink.find(".xls",len(flink)-4) <> -1 or flink.find(".txt",len(flink)-4) <> -1 or flink.find(".xlsx",len(flink)-5) <> -1 or flink.find(".rtf",len(flink)-4) <> -1:

          print '+'+flink

          content2 = urllib2.urlopen(url+flink).read()

          with open(dl_path+'/'+flink,'wb') as code:

             code.write(content2)

        temp = download.get_text()

        if flink.find("/",len(flink)-1) <> -1 and temp <> u'Parent Directory':

          directory=str(flink[:-1])

          file_path= os.path.join(dl_path,directory.replace("%20"," "))

          if not os.path.isdir(file_path):

            os.mkdir(file_path)

          print '-'+flink + ' || path is:' + str(dl_path)

          new_url=url+flink

          download_loop(new_url, file_path)

##    print u'download completed'

     

download_loop('http://www.xxx.us/', root_path)

print "~~~~~~~~~~~~~~~~~~~~~~~~~~END~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"

raw_input("Press <Enter> To Quit!")
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  python 下载工具 脚本