您的位置:首页 > 编程语言 > Python开发

python爬虫学习第十七天——我来还昨天的债了~

2017-08-10 12:04 465 查看
昨天欠下的时间只能牺牲中午休息时间来补一下了

练习1 图下是获取当前页面内链接加外链接的函数

内链接我定义的是访问时需要把当前url加在前面才能正常访问的链接

外连接只要直接当成url使用就可以成功访问

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
import datetime

dates = set()
random.seed(datetime.datetime.now())

# 获取当前页面的所有内链接(测试版)
def getInternalLinks(internalUrl):
internalLINKS=[]
html = urlopen(internalUrl)
bsObj = BeautifulSoup(html)
for link in bsObj.findAll("a", href=re.compile("^(?!(http://|https://|//)).")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLINKS:
internalLINKS.append(link.attrs['href'])
return internalLINKS
pass

# 获取当前页面的所有外链接)
def getExternalLinks(externalUrl):
externalLINKS=[]
html = urlopen(externalUrl)
bsObj = BeautifulSoup(html)
for link in bsObj.findAll("a", href=re.compile("(https?:|ftp:|file:)*//[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLINKS:
externalLINKS.append(link.attrs['href'])
return externalLINKS
pass

# links=getInternalLinks('https://www.bilibili.com/')
# for link in links:
#   print(link)

exLinks = getExternalLinks('https://www.bilibili.com/')
for exlink in exLinks:
print(exlink)


练习2 下图程序成功实现了获取随机外链接的功能

代码还不稳定,不过主要功能可以实现

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
import datetime

dates = set()
random.seed(datetime.datetime.now())

# 获取当前页面的所有内链接(测试版)
def getInternalLinks(internalUrl):
internalLINKS=[]
html = urlopen(internalUrl)
bsObj = BeautifulSoup(html)
for link in bsObj.findAll("a", href=re.compile("^(?!(http://|https://|//)).")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLINKS:
internalLINKS.append(link.attrs['href'])
return internalLINKS
pass

# 获取当前页面的所有外链接)
def getExternalLinks(externalUrl):
externalLINKS=[]
html = urlopen(externalUrl)
bsObj = BeautifulSoup(html)
for link in bsObj.findAll("a", href=re.compile("(https?:|ftp:|file:)*//[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLINKS:
externalLINKS.append(link.attrs['href'])
return externalLINKS
pass

# 获取随机外链接
def getRandomExternalLink(url):
internalLinks = getInternalLinks(url)
externalLinks = getExternalLinks(url)

if len(externalLinks)>0:
externalLINK = externalLinks[random.randint(0,len(externalLinks)-1)]
else:
if len(internalLinks)>0:
internalLINK = internalLinks[random.randint(0,len(internalLinks)-1)]
externalLINK = getRandomExternalLink(url+internalLINK)
return externalLINK
pass

# exLinks = getExternalLinks('https://www.bilibili.com/')
# for exlink in exLinks:
#   print(exlink)
#
exlink = getRandomExternalLink('https://www.bilibili.com/')
print(exlink)


中午的部分先到这儿,细节方面感觉要考虑的不少

练习3 循环的进行外连接跳转(练习用,十分不稳定)

这个程序可以使爬虫随机找到当前页面的外连接并进入,然后重复这一个过程,但这个程序使十分不稳定的,它只有功能没有差错处理,这导致它遇到禁止其访问的服务器以及不存在外链的服务器时会报各种各样的错,由于只当作练习我这方面就没有太纠结,欢迎有大牛来指导我怎么把它变得更健壮!

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
import datetime

dates = set()
random.seed(datetime.datetime.now())

# 获取当前页面的所有内链接(测试版)
def getInternalLinks(internalUrl):
internalLINKS=[]
html = urlopen(internalUrl)
bsObj = BeautifulSoup(html)
for link in bsObj.findAll("a", href=re.compile("^(?!(http://|https://|//)).*html")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLINKS:
internalLINKS.append(link.attrs['href'])
return internalLINKS
pass

# 获取当前页面的所有外链接)
def getExternalLinks(externalUrl):
externalLINKS=[]
html = urlopen(externalUrl)
bsObj = BeautifulSoup(html)
for link
ab6f
in bsObj.findAll("a", href=re.compile("(https?:|ftp:|file:)//[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLINKS:
externalLINKS.append(link.attrs['href'])
return externalLINKS
pass

# 获取随机外链接
def getRandomExternalLink(url):
internalLinks = getInternalLinks(url)
externalLinks = getExternalLinks(url)
externalLINK = None

if len(externalLinks)>0:
externalLINK = externalLinks[random.randint(0,len(externalLinks)-1)]

else:
if len(internalLinks)>0:
internalLINK = internalLinks[random.randint(0,len(internalLinks)-1)]
externalLINK = getRandomExternalLink(url+internalLINK)
return externalLINK
pass

def run(url):
newLink = getRandomExternalLink(url)
print(newLink)
run(newLink)
pass
run('https://baike.baidu.com/')


这个大练习我决定暂时先做到这里了,错误处理感觉对我来说还有点困难,我还要多看一些优质的代码。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  python