Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: f-string usages #1

Merged
merged 1 commit into from
Jul 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions core/loggers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,19 @@
# this logs the output with elements
def element_logger(elements_logs_list:list):
direc = os.getcwd()
with open(f'{direc}\\logs\\element-log__{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}__{random.randint(0, 1000)}.txt', 'a') as logs:
#logs.write(f'[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} elements={kwargs['element']} parameters={kwargs['parameters']}')
with open(f"{direc}\\logs\\element-log__{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}__{random.randint(0, 1000)}.txt", 'a') as logs:
#logs.write(f"[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} elements={kwargs['element']} parameters={kwargs['parameters']}"")
for elem_log in elements_logs_list:
logs.write(elem_log)

# this logs the output with the entire webpage
def webpage_logger(**kwargs):
direc = os.getcwd()
with open(f'{direc}\\logs\\webpage-log__{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}__{random.randint(0, 1000)}.txt', 'a') as logs:
logs.write(f'[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} parameters={kwargs['parameters']}')
with open(f"{direc}\\logs\\webpage-log__{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}__{random.randint(0, 1000)}.txt", 'a') as logs:
logs.write(f"[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} parameters={kwargs['parameters']}")

# this logs errors
def error_logger(**kwargs):
direc = os.getcwd()
with open(f'{direc}\\logs\\error-log__{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}__{random.randint(0, 1000)}.txt', 'a') as logs:
logs.write(f'[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} error={kwargs['error']}')
with open(f"{direc}\\logs\\error-log__{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}__{random.randint(0, 1000)}.txt", 'a') as logs:
logs.write(f"[{kwargs['request_type']}] [{kwargs['status']}] [{kwargs['time']}] url={kwargs['url']} error={kwargs['error']}")
18 changes: 9 additions & 9 deletions core/requestExecutor.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):

# check if there are any specific elements to be scraped, else just upload the entire webpage to a file
if elems_list == []:
save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt"

# open the save file and assign it a unique name
with open(save_path, 'a', errors='ignore') as f_w:
Expand All @@ -61,7 +61,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):

else:
elems = elems_list['elements']
save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt"

# open the save file and assign it a unique name
with open(save_path, 'a', errors='ignore') as f_a:
Expand Down Expand Up @@ -90,7 +90,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):
error_logger(url=url, time=f"Start Time: {start_time} End Time: {str(datetime.datetime.now())}", status='NOT FOUND', error=f'No links found in {url}', request_type=req_type)
else:
for i in links_scraped:
f_a.write(f'{str(i.get('href'))}\n')
f_a.write(f"{str(i.get('href'))}\n")
elems_logging_list.append(f"[{req_type}] [{code}] [Start Time: {start_time} End Time: {str(datetime.datetime.now())}] url={url} link={i.get('href')} parameters={params_list}\n")
# if the logs of elements isnt empty, log an element log. else just do nothing
if elems_logging_list != []:
Expand Down Expand Up @@ -124,7 +124,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):
# check if there are any specific elements to be scraped, else just upload the entire webpage to a file
if elems_list == []:

save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt"

with open(save_path, 'a', errors='ignore') as f_w:
f_w.write(scraped_page)
Expand All @@ -139,7 +139,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):

else:
elems = elems_list['elements']
save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt"

# open the save file and assign it a unique name
with open(save_path, 'a', errors='ignore') as f_a:
Expand Down Expand Up @@ -168,7 +168,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):
error_logger(url=url, time=f"Start Time: {start_time} End Time: {str(datetime.datetime.now())}", status='NOT FOUND', error=f'No links found in {url}', request_type=req_type)
else:
for i in links_scraped:
f_a.write(f'{str(i.get('href'))}\n')
f_a.write(f"{str(i.get('href'))}\n")
elems_logging_list.append(f"[{req_type}] [{code}] [Start Time: {start_time} End Time: {str(datetime.datetime.now())}] url={url} link={i.get('href')} parameters={params_list}\n")


Expand Down Expand Up @@ -207,7 +207,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):

# check if there are any elements to be scraped
if elems_list == []:
save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-web_scraped.txt"

with open(save_path, 'a', errors='ignore') as f_w:
f_w.write(scraped_page)
Expand All @@ -223,7 +223,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):

else:
elems = elems_list['elements']
save_path = f'{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(" ", '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt'
save_path = f"{curr_dir}\\data\\scraped-data\\{str(datetime.datetime.now()).split('.')[0].replace(' ', '_').replace(':', '-')}--{str(url).replace('/', '=').replace('.', '-').replace(':', '').replace('?', 'SEARCH_QUERY')}-elements_scraped.txt"

# open the save file and assign it a unique name
with open(save_path, 'a', errors='ignore') as f_a:
Expand Down Expand Up @@ -252,7 +252,7 @@ def request_executor(url, params_list:dict, elems_list, req_type):
error_logger(url=url, time=f"Start Time: {start_time} End Time: {str(datetime.datetime.now())}", status='NOT FOUND', error=f'No links found in {url}', request_type=req_type)
else:
for i in links_scraped:
f_a.write(f'{str(i.get('href'))}\n')
f_a.write(f"{str(i.get('href'))}\n")
elems_logging_list.append(f"[{req_type}] [{code}] [Start Time: {start_time} End Time: {str(datetime.datetime.now())}] url={url} link={i.get('href')} parameters={params_list}\n")
# if the logs of elements isnt empty, log an element log. else just do nothing
if elems_logging_list != []:
Expand Down
4 changes: 2 additions & 2 deletions gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,8 +275,8 @@ def add_to_list(which_list):
s_a_p_list.addItem(f"{site_list[-1]['url']} {site_list[-1]['request type']}")

# append to other required lists
w_r_p_site.addItem(f'{site_list[-1]['url']}')
e_t_s_for_site.addItem(f'{site_list[-1]['url']}')
w_r_p_site.addItem(f"{site_list[-1]['url']}")
e_t_s_for_site.addItem(f"{site_list[-1]['url']}")

elif which_list == 'element':
# get the element values
Expand Down