python 代码刷取CSDN阅读量
2018-03-23 22:50
120 查看
import requests from bs4 import BeautifulSoup import multiprocessing import time success_num=0 CONSTANT=0 def getProxyIp(): global CONSTANT proxy = [] for i in range(1, 50): print (i) header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'} r = requests.get('http://www.xicidaili.com/nt/{0}'.format(i), headers=header,proxies='61.135.217.7') html = r.text soup = BeautifulSoup(html,'lxml') table = soup.find('table', attrs={'id': 'ip_list'}) tr = table.find_all('tr')[1:] # 解析得到代理ip的地址,端口,和类型 for item in tr: tds = item.find_all('td') print (tds[1].get_text()) temp_dict = {} kind = tds[5].get_text().lower() # exit() if 'http' in kind: temp_dict['http'] = "http://{0}:{1}".format(tds[1].get_text(), tds[2].get_text()) if 'https' in kind: temp_dict['https'] = "https://{0}:{1}".format(tds[1].get_text(), tds[2].get_text()) proxy.append(temp_dict) return proxy def brash(proxy_dict): header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Ubuntu Chromium/44.0.2403.89 ' 'Chrome/44.0.2403.89 ' 'Safari/537.36', 'Referer':'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=baidu&wd=csdn%20%E6%80%9D%E6%83%B3%E7%9A%84%E9%AB%98%E5%BA%A6%20csdnzouqi&oq=csdn%20%E6%80%9D%E6%83%B3%E7%9A%84%E9%AB%98%E5%BA%A6&rsv_pq=fe7241c2000121eb&rsv_t=0dfaTIzsy%2BB%2Bh4tkKd6GtRbwj3Cp5KVva8QYLdRbzIz1CCeC1tOLcNDWcO8&rqlang=cn&rsv_enter=1&rsv_sug3=11&rsv_sug2=0&inputT=3473&rsv_sug4=3753' } # header ={'Mozilla/5.0 (Linux; Android 4.4.2; 2014501 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36 Html5Plus/1.0 (Immersed/25.0)'} try: # http://blog.csdn.net/zhy421202048/article/details/50935685 # http://blog.csdn.net/zhy421202048/article/details/50937423 r = requests.get("https://blog.csdn.net/web_9705/article/details/79673660", headers=header, proxies=proxy_dict, timeout=10) except Exception as e: print ("failed") # CONSTANT +=1 else: print ("successful") time.sleep(0.5) return None if __name__ == '__main__': i = 0 t = 0 final = int(input()) # 输入数字代表要获取多少次代理ip while t < final: t += 1 proxies = getProxyIp() # 获取代理ip网站上的前12页的ip # 为了爬取的代理ip不浪费循环5次使得第一次的不能访问的ip尽可能利用 # print CONSTANT for i in range(5): i += 1 # 多进程代码开了32个进程 pool = multiprocessing.Pool(processes=32) results = [] for i in range(len(proxies)): results.append(pool.apply_async(brash, (proxies[i],))) for i in range(len(proxies)): results[i].get() pool.close() pool.join() i = 0 time.sleep(20)
相关文章推荐
- [python]进阶学习之阅读代码
- 教你阅读 Python 开源项目代码
- Python--阅读优秀的代码
- python中BaseHTTPServer.py代码阅读分析
- 测试Python代码在CSDN中语法高亮的方法
- Python模拟登录csdn代码
- Source insight 阅读python 代码 配置
- 教你阅读Python开源项目代码
- 教你阅读Python开源项目代码
- 《笨办法学 Python》—— 阅读代码
- Python登录并获取CSDN博客所有文章列表代码实例
- 我的第一篇CSDN博客文章,Python代码实现矩阵翻转
- 笨方法学习Python-习题38: 阅读代码
- 阅读代码—整理学习python数据处理1
- Python 爬虫实例(10)—— 四行代码实现刷 博客园 阅读数量
- SourceInsight支持Python代码阅读
- python源码分析阅读理解chapter01~05 (纯阅读书籍,代码未看)
- python爬虫代码-CSDN博客下载
- python爬虫之模拟登陆csdn的实例代码
- 用source insight阅读python代码