Web隐藏目录扫描: 首先你需要自己寻找一个靠谱的字典,放入脚本根目录并命名为dict.log
每行一个路径名称.
import requests,threading
import argparse
from queue import Queue
head={'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
class DirScan(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
while not self._queue.empty():
url = self._queue.get()
try:
req = requests.get(url=url,headers=head,timeout=5)
if req.status_code == 200:
print("[+] " + url)
wr = open("result.html","a+")
wr.write('<a href="' + url + '" target="_blank">' + url + '</a></br>')
except Exception:
pass
def StartThread(url,count):
queue = Queue()
threads = []
thread_count = int(count)
fp = open("dict.log","r",encoding="utf-8")
for item in fp:
queue.put(url+item.rstrip("\n"))
for item in range(thread_count):
threads.append(DirScan(queue))
for t in threads:
t.start()
for t in threads:
t.join()
def Banner():
print(" _ ____ _ _ ")
print(" | | _ _/ ___|| |__ __ _ _ __| | __")
print(" | | | | | \___ \| '_ \ / _` | '__| |/ /")
print(" | |__| |_| |___) | | | | (_| | | | < ")
print(" |_____\__, |____/|_| |_|\__,_|_| |_|\_\\")
print(" |___/ \n")
print("E-Mail: me@lyshark.com")
if __name__ == "__main__":
# 使用方式: main.py -u http://www.lyshark.com -t 10
Banner()
parser = argparse.ArgumentParser()
parser.add_argument("-u","--url",dest="url",help="Specify the URL of the blast")
parser.add_argument("-t","--thread",dest="count",default=5,help="Specify the number of threads to open")
args = parser.parse_args()
if args.url:
StartThread(args.url,args.count)
else:
parser.print_help()
Web应用目录扫描器: 这里的前提是Web服务器使用的是开源CMS来建站的,而且自己也下载了一套相应的开源代码,感觉意义并不大。
import Queue
import threading
import os
import urllib2
threads = 10
target = "http://www.lyshark.com/"
directory = "/lysh"
filters = [".jpg",".gif",".png",".css"]
os.chdir(directory)
web_paths = Queue.Queue()
for r,d,f in os.walk("."):
for files in f:
remote_path = "%s/%s"%(r,files)
if remote_path.startswith("."):
remote_path = remote_path[1:]
if os.path.splitext(files)[1] not in filters:
web_paths.put(remote_path)
def test_remote():
while not web_paths.empty():
path = web_paths.get()
url = "%s%s"%(target,path)
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
content = response.read()
print("[%d] => %s"%(response.code,path))
response.close()
except urllib2.HTTPError as error:
pass
for i in range(threads):
t = threading.Thread(target=test_remote)
t.start()
暴力破解目录和文件位置:
import urllib2
import threading
import Queue
import urllib
threads = 50
target_url = "http://testphp.vulnweb.com"
wordlist_file = "/tmp/all.txt" # from SVNDigger
resume = None
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0"
def build_wordlist(wordlist_file):
#读入字典文件
fd = open(wordlist_file,"rb")
raw_words = fd.readlines()
fd.close()
found_resume = False
words = Queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print "Resuming wordlist from: %s"%resume
else:
words.put(word)
return words
def dir_bruter(word_queue,extensions=None):
while not word_queue.empty():
attempt = word_queue.get()
attempt_list = []
#检测是否有文件扩展名,若没有则就是要暴力破解的路径
if "." not in attempt:
attempt_list.append("/%s/"%attempt)
else:
attempt_list.append("/%s"%attempt)
#如果我们想暴破扩展
if extensions:
for extension in extensions:
attempt_list.append("/%s%s"%(attempt,extension))
#迭代我们要尝试的文件列表
for brute in attempt_list:
url = "%s%s"%(target_url,urllib.quote(brute))
try:
headers = {}
headers["User-Agent"] = user_agent
r = urllib2.Request(url,headers=headers)
response = urllib2.urlopen(r)
if len(response.read()):
print "[%d] => %s"%(response.code,url)
except urllib2.URLError, e:
if hasattr(e,'code') and e.code != 404:
print "!!! %d => %s"%(e.code,url)
pass
word_queue = build_wordlist(wordlist_file)
extensions = [".php",".bak",".orig",".inc"]
for i in range(threads):
t = threading.Thread(target=dir_bruter,args=(word_queue,extensions,))
t.start()