-
Notifications
You must be signed in to change notification settings - Fork 3
/
XSPID3R.py
157 lines (114 loc) · 4.64 KB
/
XSPID3R.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# By MOHAMMED ADEL
# Twitter : @moh_security
import requests # pip install requests
import re # pip install re
import argparse # pip install argparse
from bs4 import BeautifulSoup, SoupStrainer # pip install BeautifulSoup | pip install SoupStrainer
def prRed(prt): print("\033[91m {}\033[00m" .format(prt))
def prGreen(prt): print("\033[92m {}\033[00m" .format(prt))
def prPurple(prt): print("\033[95m {}\033[00m" .format(prt))
def prCyan(prt): print("\033[96m {}\033[00m" .format(prt))
prRed (
'''
****************************************
:
:
,, : ,,
:: : ::
,, :: : :: ,,
:: :: : :: ::
'::. '::. : .::' .::'
'::. '::. _/~\_ .::' .::'
'::. :::/ \::: .::'
':::::( ):::::'
\ ___ /
.:::::/` `\:::::.
.::' .:\o o/:. '::.
.::' .:: :": ::. '::.
.::' ::' ' ' ':: '::.
:: :: :: ::
^^ :: :: ^^
:: ::
^^ ^^
'''
)
parser = argparse.ArgumentParser(description="[+] XSPID3R is a tool that uses DNSDUMPSTER To Scan specific TARGETS.[+]")
parser.add_argument("-s", "--search", type=str, help="-s example.com OR --search example.com")
parser.add_argument("-b", "--backup", type=str, help="-b example.com OR --backup example.com")
parser.add_argument("-a", "--author", type=str, help="-a show OR --author show")
args = parser.parse_args()
client = requests.session()
GET_URL_VCOMM = args.search
GET_BACKUO_URL = args.backup
if args.search:
URL_TARGET = GET_URL_VCOMM
URL = 'https://dnsdumpster.com/'
client.get(URL)
csrftoken = client.cookies['csrftoken']
payload = {
'targetip':URL_TARGET,
'csrfmiddlewaretoken':csrftoken
}
DNS_FILE = open('DNS_RESP.txt','w')
r = client.post(URL, data=payload, headers=dict(Referer=URL))
DNS_FILE.write(r.text)
DNS_FILE.close()
DNS_LINKS = open('DNS_LINKS.txt', 'w')
scan1 = open('DNS_RESP.txt', 'r')
soup = BeautifulSoup(scan1, "lxml")
for link1 in soup.findAll('a', attrs={'href': re.compile('((http|https)s?://.*?)')}):
DNS_LINKS.write(link1.get('href'))
DNS_LINKS.write("\n")
DNS_LINKS.close()
FIND_RE_URLS = open('DNS_LINKS.txt', 'r')
ReadDATA = FIND_RE_URLS.readlines()
FIND_RE_URLS.close()
DNS_FILTER_LINKS = open('DNS_LINKS.txt', 'r')
DNS_FFR_LINKS = open('DNS_FFR.txt', 'w')
FilterDATA = DNS_FILTER_LINKS.readlines()
for line in FilterDATA:
DNS_FFR_LINKS.write(line.rpartition('?q=')[2])
DNS_FFR_LINKS.close()
DNS_FILTER_LINKS.close()
DNS_PRINT_OUT = open('DNS_FFR.txt', 'r+b')
Final_Result = DNS_PRINT_OUT.readlines()
prCyan("[+] TARGET : "+URL_TARGET)
prCyan("[+] CSRF TOKEN : "+csrftoken)
for line in Final_Result:
if "https://dnsdumpster.com/static/xls/" in line:
S_REPORT = line
prCyan("[+] Scan Report : "+S_REPORT)
Choice_TO_SRESULT = raw_input("[**] Show RESULT ? (y/n) #> ")
PRINT_FINAL_URLSS = open('FINAL_RESULT.txt', 'a')
PRINT_FINAL_URLSS.write("[+] TARGET : "+URL_TARGET)
PRINT_FINAL_URLSS.write("\n")
PRINT_FINAL_URLSS.write("[+] CSRF TOKEN : "+csrftoken)
PRINT_FINAL_URLSS.write("\n")
PRINT_FINAL_URLSS.write("[+] Scan Report : "+S_REPORT)
PRINT_FINAL_URLSS.write("\n")
if Choice_TO_SRESULT == "y" or Choice_TO_SRESULT == "Y":
for line in Final_Result:
if URL_TARGET in line:
print "\n"
prGreen("[+] "+line)
PRINT_FINAL_URLSS.write(line)
elif Choice_TO_SRESULT == "n" or Choice_TO_SRESULT == "N":
print "\n"
for line in Final_Result:
if URL_TARGET in line:
PRINT_FINAL_URLSS.write(line)
prGreen("[INFO] DATA SAVED IN FILE [FINAL_RESULT.txt] \n")
prGreen("[INFO] To Search Backup Data Use The Commands Below : \n")
prGreen("[INFO] python XSPID3R.py --backup examples.com \n")
print "\n"
elif args.backup:
Search_FILE = open('FINAL_RESULT.txt', 'r')
FILE_DATA = Search_FILE.readlines()
for line in FILE_DATA:
if GET_BACKUO_URL in line:
prGreen(line)
elif args.author:
prPurple("[+] Author : MOHAMMED ADEL\n")
prPurple("[+] Twitter : @moh_security\n")
prPurple("[+] Github : github.com/inurlx\n")
exit()