您的位置:首页 > 数据库

基于tornado的web探测系统(自带旁注、端口扫描、whois、sql注入监测)

2012-07-06 21:14 519 查看
http.py
import tornado.ioloop,os
import tornado.web
import unit,socket

user='yk'
passwd='123'

class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('''
<form id="form1" name="form1" method="post" action="">
<table width="276" border="0" align="center">
<tr>
<th width="266" scope="col">目标: <input name="ip" type="text" value="www.google.com" /></th>
</tr>
<tr>
<th scope="row">

<input type="submit" value="提交" />
</th>
</tr>
</table>
</form>

''')
def post(self):
tar=self.get_argument("ip")
self.set_secure_cookie("tar", tar)
self.redirect("/post")

class ScanHandler(tornado.web.RequestHandler):
def get(self):
tar=self.get_secure_cookie("tar").decode().strip()
ip=socket.gethostbyname(tar)
#print(act)
#显示cookie
#self.write(self.get_secure_cookie("user_id"))
#self.render("template.html", title="My title", items=items)
try:
act=self.get_argument("act")
if act=='port':
port=unit.scanport(tar)
print(port)
self.render("dict.html", name='端口',value='结果',tar=tar,ip=ip,items=port)
elif act=='http':
head=unit.gethead(tar)
self.render("dict.html", name='头',value='值',tar=tar,ip=ip,items=head)
elif act=='whois':
#ipwhois=unit.ipwhois(tar)
whois=unit.whois(tar)
print(whois)
whoisdb={}
whoisdb['内容']=whois
self.render("dict.html", name='名称',value='whois内容',tar=tar,ip=ip,items=whoisdb)
elif act=='trac':
tra=unit.trac(tar)
self.render("list.html", name='途径IP',ip=ip,tar=tar,items=tra)
elif act=='sql':
sql=unit.getsql(tar)
print (sql)
self.render("list.html", name='URL',ip=ip,tar=tar,items=sql)

elif act=='syyl':
syyl=unit.syget(tar)
self.render("list.html", name='首页内容',ip=ip,tar=tar,items=syyl)

else:

self.render("pz.html", name='域名',value='介绍',ip=ip,tar=tar)
except:
pzs=unit.getrip(tar)
self.render("pz.html", name='域名',value='介绍',ip=ip,tar=tar,items=pzs)

settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "61oETzKXQAGaYdkL5gEmGdJJFuYh7EQnp2XdTP1o/Vo=",
#"login_url": "/login",
#"xsrf_cookies": True,
}

application = tornado.web.Application([
(r"/", MainHandler),
(r"/post", ScanHandler),
(r"/(.*)", tornado.web.StaticFileHandler, dict(path=settings['static_path']))
],**settings)

if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()

unit.py

import sys,os,socket,threading
from bs4 import BeautifulSoup
import httplib2
import urllib.parse as up
import urllib.request as ur

#--------------------------------胖猪代码----------------------------
url=r'http://cn.bing.com/search?count=100&q=ip:'
httphead={'User-Agent':'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; User-agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; http://bsalsa.com) ; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152)',
'Cookie':'SRCHUID=V=2&GUID=79E9F92F75B54E60B4588D130264EFD4; MUID=0A81369FC80C6E532B69359EC9026E42; SRCHD=SM=1&MS=2196069&D=2160426&AF=NOFORM; SRCHUSR=AUTOREDIR=0&GEOVAR=&DOB=20120209; _SS=SID=C8C39DCC3EA342E2859C472E445A1BEC; _UR=D=0; RMS=F=O&A=Q; SCRHDN=ASD=0&DURL=#',
'Referer':'http://cn.bing.com/'}

def getrip(tar):
ip=socket.gethostbyname(tar.strip())
h=httplib2.Http()
res,cont=h.request(url+ip,headers=httphead)
soup=BeautifulSoup(cont)
dit=dict()
for i in soup.findAll('div',attrs={'class':'sb_tlst'}):
dit[up.urlparse(i.a['href']).netloc]=i.a.text
return dit

'''
for i in dit:
try:
print(i,dit[i])
except:
pass
'''
#---------------------------端口扫描代码-------------------------------

portlist=[21,22,23,25,53,80,81,110,111,139,135,443,445,1723,2012,3389,1433,3306,1521,8080,3128,5900]
op=[]
tlist=[]

def con(tar,port):
global oklist
s=socket.socket()
s.settimeout(3)
try:s.connect((tar,port));op.append(port)
except Exception as err:pass
finally:s.close()

def scanport(tar):
global tlist
global op
for i in portlist:
tlist.append(threading.Thread(target=con,args=((tar,i))))
for i in tlist:
i.start()
for i in tlist:
i.join()
tlist=[]
oport={}
for i,l in enumerate(op):
oport[str(l)]='开放'
op=[]
return oport

#tar=input('target :\n').strip()

#----------------------------获得主机头信息-------------------------------

def gethead(tar):
h=httplib2.Http()
res,cont=h.request(r'http://'+tar,headers=httphead)
return res

#---------------------------------whois 信息------------------------------

def wlst():
wslst=[]
wslst.append(("com","whois.verisign-grs.com"))
wslst.append(("net","whois.verisign-grs.com"))
wslst.append(("org","whois.pir.org"))
wslst.append(("nfo","whois.afilias.info"))
wslst.append(("biz","whois.biz"))
wslst.append((".cc","whois.nic.cc"))
wslst.append(("edu","whois.educause.net"))
wslst.append(("mil","whois.nic.mil"))
wslst.append(("gov","whois.nic.gov"))
wslst.append((".uk","whois.nic.uk"))
wslst.append((".us","whois.nic.us"))
wslst.append(("ame","whois.nic.name"))
wslst.append(("eum","whois.museum"))
wslst.append((".su","whois.ripn.net"))
wslst.append((".ru","whois.nic.ru"))
wslst.append(("int","whois.iana.org"))
wslst.append((".ws","whois.worldsite.ws"))
wslst.append((".kr","whois.krnic.net"))
wslst.append((".jp","whois.nic.ad.jp"))
wslst.append((".it","whois.nic.it"))
wslst.append((".de","whois.denic.de"))
wslst.append((".fr","whois.nic.fr"))
wslst.append((".ca","whois.cira.ca"))
wslst.append((".cn","whois.cnnic.net.cn"))
wslst.append((".tw","whois.twnic.net.tw"))
wslst.append((".hk","whois.hkdnr.net.hk"))
wslst.append((".au","whois.aunic.net"))
wslst.append((".ac","whois.nic.ac"))
return wslst

def ipwhois(tar,ipws='whois.iana.org'):
tar=socket.gethostbyname(tar)
s=socket.socket()
s.connect((ipws,43))
print(tar.encode())
s.send(tar.encode()+b'\r\n')
data=s.recv(99999)
print('a')
return data.decode()

def whois(tar):
if len(tar.split('.'))>2:
tar=tar.split('.',1)[1]
for i in wlst():
if tar.endswith(i[0]):
print(i)
s=socket.socket()
s.connect((i[1],43))
print(tar.encode())
#data=s.recv(99999)
s.send(tar.encode()+b'\r\n')
data=s.recv(9999)
data=s.recv(99999)
return data.decode('utf8','ignore')

#------------------------------首页Sql--------------------------------

def getsql(tar):
h=httplib2.Http()
q=tar.split('.',1)[-1]
res,cont=h.request(r'http://'+tar,headers=httphead)
soup=BeautifulSoup(cont)
a=[]
for i in soup.findAll('a'):
if isinstance(i.get('href'),str):href=i.get('href')
if not href.startswith('http'):href=r'http://'+tar+r'/'
if not q in href:continue
if '?' in href:a.append(href)
return a

#-------------------------------tracert-----------------------
def trac(tar):
a=os.popen(r'tracert -d -w 2 -h 20 '+tar)
t=[]
for i in a:
tmp=i.split()
if len(tmp)==7:t.append(tmp[-1])
return t

#----------------------------首页预览---------------------------
def syget(tar):
h=httplib2.Http()
res,cont=h.request(r'http://'+tar,headers=httphead)
soup=BeautifulSoup(cont).body.text
return [soup]


list.html

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>测试</title>
</head>
<body >
<form  method="post" action="/" align="center">
<input name="ip" type="text" /><input type="submit" value="提交" />
</form>
<BR>
<div align="center">目标为:{{ tar }}({{ip}})</div><BR>
<table width="100%" border="1" align="center">
<tr>
<th scope="col"><A HREF="?act=pz">旁注信息</A></th>
<th scope="col"><A HREF="?act=port">端口信息</A></th>
<th scope="col"><A HREF="?act=http">HTTP头信息</A></th>
<th scope="col"><A HREF="?act=whois">Whois信息</A></th>
<th scope="col"><A HREF="?act=trac">路由信息</A></th>
<th scope="col"><A HREF="?act=syyl">首页预览</A></th>
<th scope="col"><A HREF="?act=sql">首页SQL注入漏洞</A></th>
</tr>
</table>
<br />
<br />
<table width="93%" border="1" align="center" cellpadding="1" cellspacing="0">
<tr>
<th width="50%" scope="col">{{ name }}</th>

</tr>
{% for item in items %}
<tr><td align="center"></th><A HREF="{{ escape(item)}}"> {{escape(item)}}</A> </tr>
{% end %}

</table>
</body>
</html>


其他的几个模板就不提供了,一样的
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: