-
Notifications
You must be signed in to change notification settings - Fork 6
/
openredacan.py
163 lines (130 loc) · 5.52 KB
/
openredacan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
#!/usr/bin/env python3
import re
import random
import requests
import warnings
import argparse
from plugins.banner import *
from bs4 import BeautifulSoup
from core.bug import crlfScanFunc
from payload.payload import sourcesSinks
from core.gatherurls import getAllURLsFunc
from core.features import requesterFunc, multitest
# Ignore Wornings
warnings.filterwarnings('ignore')
# Print Banner
bannerFunc()
# Arguments
parser = argparse.ArgumentParser(usage="Help Menu")
parser.add_argument('-u', help='Domain Name.', dest="url")
parser.add_argument('-l', help='Multiple targets. (Ex: domains.txt)', dest='path')
parser.add_argument('-crlf', help='Scan CRLF Injection.',action='store_true', dest='crlf')
parser.add_argument('-p', help='Use payloads file.',dest="payload", default="text/payloads.txt")
parser.add_argument('--proxy', help='use proxy',action='store_true', dest='proxy')
parser.add_argument('--wayback', help='fetch URLs from waybackmachine',action="store_true", dest='waybacks')
args = parser.parse_args()
url = args.url
if ((args.payload != "text/payloads.txt") and (args.crlf or args.waybacks)):
print(f"{bold}{red} '-p' can't be used with '-crlf' or '--wayback'{reset}")
quit()
if not (args.url or args.path):
print(f"{orange}No arguments, Run -h for help{reset}")
if not args.crlf and not args.waybacks:
try:
file = open(args.payload, encoding='utf-8').read().splitlines()
except FileNotFoundError:
print(f"{bold}{red}Payload file not found{reset}")
exit()
if args.path:
try:
urls = open(args.path, encoding='utf-8').read().splitlines()
except FileNotFoundError:
print(f"{bold}{red}Target file not found{reset}")
quit()
def analyze(url):
multiTestCall = multitest(url, file)
print(f'{bold}{orange}[~] Payload Type :{reset}{blue} Infusing payloads\n{reset}')
if type(multiTestCall) == tuple:
for params in multiTestCall[0]:
testingBreak = request(multiTestCall[1], params)
if testingBreak:
break
else:
for url in multiTestCall:
testingBreak = request(url)
if testingBreak:
break
def request(URI, params=''):
try:
page = requesterFunc(URI, args.proxy, params)
except requests.exceptions.Timeout:
print(f"[Timeout] {url}")
return True
except requests.exceptions.ConnectionError:
print(f"{bold}{red}Connection Error{reset}")
return True
funcBreak = check(page, page.request.url)
if funcBreak:
return True
def check(requestsObjectVar, finalURL):
payload = "|".join([re.escape(i) for i in file])
redirectCodes = [red for red in range(300, 311, 1)]
errorCodes = [error for error in range(400, 411, 1)]
soup = BeautifulSoup(requestsObjectVar.text, 'html.parser')
google = re.search(payload, str(soup.find_all("script")), re.IGNORECASE)
metas = str(soup.find_all('meta'))
searchMetaTagVar = re.search(payload, metas, re.IGNORECASE)
escapedSourcesSinks = [re.escape(SnS) for SnS in sourcesSinks]
sourcesMatch = list(dict.fromkeys(re.findall(
"|".join(escapedSourcesSinks), str(soup))))
if requestsObjectVar.status_code in redirectCodes:
if searchMetaTagVar and "http-equiv=\"refresh\"" in metas:
print(f"{bold}{green}Meta Tag Redirection{reset}")
return True
else:
print(f"{bold}{green}[~] Header Based Redirection :{reset}{purple}{finalURL} {requestsObjectVar.headers['Location']}\n")
elif requestsObjectVar.status_code == 200:
if google:
print(f"{bold}{green}[~] Javascript Based Redirection{reset}" )
if sourcesMatch != None:
print(f"{bold}{green}Potentially Vulnerable Source/Sink(s) Found: %s" % (" ".join(sourcesMatch)))
return True
if searchMetaTagVar and "http-equiv=\"refresh\"" in str(requestsObjectVar.text):
print(f"{bold}{green}Meta Tag Redirection{reset}")
return True
elif "http-equiv=\"refresh\"" in str(requestsObjectVar.text) and not searchMetaTagVar:
print(f"{bold}{red}The page is only getting refreshed.{reset}")
return True
elif requestsObjectVar.status_code in errorCodes:
print(f"{bold}{red} {finalURL} {orange} {requestsObjectVar.status_code}{reset}")
else:
print(f"{bold}{red} Found nothing :: {finalURL}")
try:
if args.url:
if args.crlf and not args.waybacks:
crlfScanFunc(url, args.proxy)
elif args.waybacks and not args.crlf:
print(f"{bold}{orange}Getting URLs from waybackmachine{reset}")
getAllURLsFunc(url, "wayback_data.txt")
elif not (args.crlf and args.waybacks):
analyze(url)
elif args.path:
if args.crlf and not args.waybacks:
for url in urls:
print(f"{bold}{orange}Target: {url}")
crlfScanFunc(url, args.proxy)
print("\n")
elif args.waybacks and not args.crlf:
print(f"{bold}{orange}Getting URLs from waybackmachine")
for url in urls:
print(f"{bold}{orange}URL: {url}")
getAllURLsFunc(url, f"wayback_{random.randint(0, 1000)}.txt")
print("\n")
elif not (args.crlf and args.waybacks):
for url in urls:
print(f"{bold}{orange}Target: {blue}{url}{reset}")
analyze(url)
print("\n")
except KeyboardInterrupt:
print("\nKeyboard Interrupt Detected. Exiting...")
exit()