-
Notifications
You must be signed in to change notification settings - Fork 0
/
xssSick_detailed_old.py
181 lines (153 loc) · 6.54 KB
/
xssSick_detailed_old.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import os
import sys
import requests
from colorama import Fore, Style, init
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
# Check if the correct number of arguments is provided
if len(sys.argv) != 2:
print("Usage: python3 script_name.py example.com.txt")
sys.exit(1)
# Get the txt file of urls from command-line arguments
file = sys.argv[1]
# Extracts domain name
domain, _ = os.path.splitext(os.path.basename(file))
# Initialize colorama
init(autoreset=True)
def read_match_words_from_file(match_file_path):
"""Reads match words from a file and returns a list."""
try:
with open(match_file_path, "r") as match_file:
# Read and strip each line in the file
match_words = [line.strip() for line in match_file]
return match_words
except FileNotFoundError as e:
print(f"Error: {e}")
return []
def modify_query_param(url, param_name, new_value):
"""Modifies a query parameter in the given URL and returns the modified URL."""
parsed_url = urlparse(url)
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
query_params[param_name] = [new_value]
parsed_url = list(parsed_url)
parsed_url[4] = urlencode(query_params, doseq=True)
modified_url = urlunparse(parsed_url)
return modified_url
def dom_possible(sinks, sources, response, output_file):
# Finds the first combination of 'sink' and 'source' in the origin url.
# Writes the formatted output to the file and breaks out of both loops when a match is found.
for sink in sinks:
if sink.lower() in response.text.lower():
for source in sources:
if source.lower() in response.text.lower() and not (
"location" in sink and "location" in source
):
output_file.write(f"{sink:22} {source:22} {response.url}\n")
output_file.flush()
return
def search_words_in_webpage(url, search_words, user_agent, output_file, timeout=23):
"""Searches for words in a webpage and writes results to the output file."""
try:
# Set up a session with user agent
with requests.Session() as session:
session.headers = {"User-Agent": user_agent}
# Fetch the webpage content with user agent and timeout, allowing redirects
response = session.get(url, timeout=timeout, allow_redirects=True)
response.raise_for_status()
# dom_possible(SINKS, SOURCES, response, output_file)
# Check if any of the search words are present in the source code
for search_word in search_words:
if search_word.lower() in response.text.lower():
found_text = f"The word '{search_word}' was found."
# Print the found text in green and the match word in red
print(
Fore.GREEN
+ found_text.replace(
search_word, Fore.RED + search_word + Fore.GREEN
)
+ Style.RESET_ALL
)
output_file.write(
f"{search_word:22} {response.url}\n"
) # Use response.url for the final URL
output_file.flush() # Ensure the content is written immediately
# Check for redirects
if response.url != url:
# dom_possible(SINKS, SOURCES, response, output_file)
# Check the source code of the redirected page
for search_word in search_words:
if search_word.lower() in response.text.lower():
found_text = f"The word '{search_word}' was found in the redirected page."
print(
Fore.GREEN
+ found_text.replace(
search_word, Fore.RED + search_word + Fore.GREEN
)
+ Style.RESET_ALL
)
output_file.write(
f"{search_word:22} origin:{url} landed:{response.url}\n"
)
output_file.flush() # Ensure the content is written immediately
except requests.exceptions.RequestException:
print(f"Failed to fetch: {url}")
def search_words_in_multiple_urls(
url_file_path, match_file_path, user_agent, output_file_path
):
"""Searches for words in multiple URLs and saves the results to a file."""
try:
# Read URLs from file
with open(url_file_path, "r") as url_file:
urls = [line.strip() for line in url_file]
# Read match words from file
match_words = read_match_words_from_file(match_file_path)
total_urls = len(urls)
urls_processed = 0
# Save found URLs to a text file
with open(output_file_path, "a") as output_file:
# Test each URL for the presence of search words
for url in urls:
urls_processed += 1
print(f"\nProcessing URL {urls_processed} of {total_urls}")
print(f"Current URL: {url}")
for param_name, param_values in parse_qs(
urlparse(url).query, keep_blank_values=True
).items():
new_value = r"""asdf">/<"""
modified_url = modify_query_param(url, param_name, new_value)
search_words_in_webpage(
modified_url, match_words, user_agent, output_file
)
print("\nAll URLs processed. Found URLs saved to", output_file_path)
except FileNotFoundError as e:
print(f"Error: {e}")
# Define constants
MATCH_FILE_PATH = "match.txt"
SOURCES = [
"document.URL",
"document.documentURI",
"document.baseURI",
"location",
"document.cookie",
"document.referrer",
"window.name",
"history.pushState",
"history.replaceState",
"localStorage",
"sessionStorage",
]
SINKS = [
"document.write",
"window.location",
"document.domain",
"element.innerHTML",
"element.setAttribute",
"location",
"element.outerHTML",
"element.insertAdjacentHTML",
"element.onevent",
"eval",
]
USER_AGENT = r"""Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/120.0"""
OUTPUT_FILE_PATH = domain + "_found_urls.txt"
# Starts the program
search_words_in_multiple_urls(file, MATCH_FILE_PATH, USER_AGENT, OUTPUT_FILE_PATH)