From 111b820132aa09d0e2271531b7ca912149ed9070 Mon Sep 17 00:00:00 2001 From: Kartikey Gupta Date: Thu, 2 May 2024 20:56:45 +0530 Subject: [PATCH] fix: pylint errors --- .../agent-assist/backend/src/apis/chatbot.py | 3 +-- .../backend/src/apis/customermanagement.py | 24 ++++------------- .../agent-assist/backend/src/backend.py | 3 +-- .../preprocessing/table/process_function.py | 6 ++--- .../preprocessing/table/table_to_text.py | 20 +++++++------- .../preprocessing/table/text_bison.py | 22 ++++++++-------- .../agents/search_agent/utils/text_bison.py | 26 ++++++++++++------- .../src/chatbot_dir/dataframe_answer.py | 8 +++--- .../src/chatbot_dir/orchestration_engine.py | 4 +-- .../agent-assist/backend/src/utils/cal.py | 10 +++---- .../backend/src/utils/gemini_text.py | 22 ++++++++-------- .../backend/src/utils/get_users.py | 12 ++++----- .../agent-assist/backend/src/utils/mail.py | 16 +++--------- .../backend/src/utils/mail_trial.py | 10 ++++--- .../backend/src/utils/text_bison.py | 18 ++++++------- 15 files changed, 96 insertions(+), 108 deletions(-) diff --git a/gemini/sample-apps/agent-assist/backend/src/apis/chatbot.py b/gemini/sample-apps/agent-assist/backend/src/apis/chatbot.py index f141a92062..6b41b1ed34 100644 --- a/gemini/sample-apps/agent-assist/backend/src/apis/chatbot.py +++ b/gemini/sample-apps/agent-assist/backend/src/apis/chatbot.py @@ -37,8 +37,7 @@ def chatbot_entry(data: dict = {}) -> dict: with open("data/static/oe_examples/logs.json", "w") as f: json.dump(logs, f) - result = run_orchestrator(query, chat_history_string) - return result + run_orchestrator(query, chat_history_string) def process_history(chat_history): diff --git a/gemini/sample-apps/agent-assist/backend/src/apis/customermanagement.py b/gemini/sample-apps/agent-assist/backend/src/apis/customermanagement.py index ac54af51bc..0f4484602a 100644 --- a/gemini/sample-apps/agent-assist/backend/src/apis/customermanagement.py +++ b/gemini/sample-apps/agent-assist/backend/src/apis/customermanagement.py @@ -39,9 +39,7 @@ def get_customer_management_data(): jsonify( { "total_active_customers": total_active_customers, - "average_satisfaction_score": float( - "{:.3}".format(average_satisfaction_score) - ), + "average_satisfaction_score": float(f"{average_satisfaction_score:.3}"), "total_lapsed_customers": total_lapsed_customers, "chart_data": chart_data, } @@ -76,11 +74,7 @@ def get_metrics_data(data: list, start_date: str, end_date: str): if policy_start_date is None: continue - if ( - policy["current_policy"] - and policy_start_date >= start_date - and policy_start_date <= end_date - ): + if policy["current_policy"] and start_date <= policy_start_date <= end_date: total_active_customers += 1 if total_ratings != 0: @@ -106,11 +100,7 @@ def get_lapsed_customers(data: list, start_date: str, end_date: str): policy_end_date = policy["policy_end_date"] if policy_end_date is None: continue - if ( - policy["current_policy"] is None - and policy_end_date >= start_date - and policy_end_date <= end_date - ): + if policy["current_policy"] is None and start_date <= policy_end_date <= end_date: total_lapsed_customers += 1 return total_lapsed_customers @@ -131,7 +121,7 @@ def get_chart_data(data, start_date, end_date): start_date = datetime.strptime(start_date, "%Y-%m-%d") end_date = datetime.strptime(end_date, "%Y-%m-%d") month_list = [ - datetime.strptime("%2.2d-%2.2d" % (year, month), "%Y-%m").strftime("%b-%y") + datetime.strptime(f"{year:02}-{month:02}", "%Y-%m").strftime("%b-%y") for year in range(start_date.year, end_date.year + 1) for month in range( start_date.month if year == start_date.year else 1, @@ -156,11 +146,7 @@ def get_chart_data(data, start_date, end_date): "satisfaction_score": policy["satisfaction_score"], "count": 1, } - if ( - policy["current_policy"] - and policy_start_date >= start_date - and policy_start_date <= end_date - ): + if policy["current_policy"] and start_date <= policy_start_date <= end_date: if month in month_data: month_data[month]["active_customers"] += 1 else: diff --git a/gemini/sample-apps/agent-assist/backend/src/backend.py b/gemini/sample-apps/agent-assist/backend/src/backend.py index 00f6aa78bb..5772fe5d6d 100644 --- a/gemini/sample-apps/agent-assist/backend/src/backend.py +++ b/gemini/sample-apps/agent-assist/backend/src/backend.py @@ -163,8 +163,7 @@ def handle_chatbot(data): """Handles the chatbot.""" print(data) emit("chat", ["Generating..."]) - chatbot_response = chatbot.chatbot_entry(data) - print(chatbot_response) + chatbot.chatbot_entry(data) emit("chat", ["Done"]) diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/process_function.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/process_function.py index a80a7a2c1e..19f185a649 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/process_function.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/process_function.py @@ -8,7 +8,7 @@ from src.chatbot_dir.agents.search_agent.preprocessing.table.text_bison import TextBison -def processTable(table_df_string: str) -> str: +def process_table(table_df_string: str) -> str: """Processes a table in dataframe string format using TextBison. Args: @@ -18,6 +18,6 @@ def processTable(table_df_string: str) -> str: str: The processed table in dataframe string format. """ tb = TextBison() - PROMPT = PROMPT_FOR_TABLE.format(table_df_string) - df_string = tb.generate_response(PROMPT) + prompt = PROMPT_FOR_TABLE.format(table_df_string) + df_string = tb.generate_response(prompt) return df_string diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/table_to_text.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/table_to_text.py index 4133408357..8fde40ac05 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/table_to_text.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/table_to_text.py @@ -7,23 +7,23 @@ from img2table.document import PDF from img2table.ocr import TesseractOCR from src.chatbot_dir.agents.search_agent.preprocessing.table.process_function import ( - processTable, + process_table, ) # Function to process the PDF tables and save the extracted text to files. -def process_pdf_tables(DOCUMENT_PATH: str, POLICY_NAME: str) -> None: +def process_pdf_tables(document_path: str, policy_name: str) -> None: """Processes the PDF tables and saves the extracted text to files. Args: - DOCUMENT_PATH (str): The path to the PDF document. - POLICY_NAME (str): The name of the policy to which the PDF document belongs. + document_path (str): The path to the PDF document. + policy_name (str): The name of the policy to which the PDF document belongs. """ - OUTPUT_PATH = f"data/static/table_text/{POLICY_NAME}/" + output_path = f"data/static/table_text/{policy_name}/" - pdf = PDF(src=DOCUMENT_PATH) + pdf = PDF(src=document_path) ocr = TesseractOCR(lang="eng") @@ -31,17 +31,17 @@ def process_pdf_tables(DOCUMENT_PATH: str, POLICY_NAME: str) -> None: for idx, pdf_table in pdf_tables.items(): try: - os.makedirs(OUTPUT_PATH + str(idx)) + os.makedirs(output_path + str(idx)) except OSError: pass if not pdf_table: continue for jdx, table in enumerate(pdf_table): table_df_string = table.df.to_string() - table_string = processTable(table_df_string) + table_string = process_table(table_df_string) print(table_string) - with open(OUTPUT_PATH + f"{idx}/table_df_{jdx}.txt", "w") as f: + with open(output_path + f"{idx}/table_df_{jdx}.txt", "w") as f: f.write(table_df_string) - with open(OUTPUT_PATH + f"{idx}/table_string_{jdx}.txt", "w") as f: + with open(output_path + f"{idx}/table_string_{jdx}.txt", "w") as f: f.write(table_string) diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/text_bison.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/text_bison.py index b360514bd6..daee3026f2 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/text_bison.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/preprocessing/table/text_bison.py @@ -20,8 +20,8 @@ class TextBison: A class to interact with the Text Bison model from Vertex AI. Args: - PROJECT_ID (str): The project ID of the Vertex AI project. - LOCATION (str): The location of the Vertex AI project. + project_id (str): The project ID of the Vertex AI project. + location (str): The location of the Vertex AI project. max_output_tokens (int): The maximum number of tokens to generate. temperature (float): The temperature controls the randomness of the generated text. top_p (float): Top-p nucleus sampling. @@ -30,8 +30,8 @@ class TextBison: def __init__( self, - PROJECT_ID=config["PROJECT_ID"], - LOCATION=config["LOCATION"], + project_id=config["PROJECT_ID"], + location=config["LOCATION"], max_output_tokens: int = 8192, temperature: float = 0.1, top_p: float = 0.8, @@ -41,15 +41,15 @@ def __init__( Initialize the TextBison class. Args: - PROJECT_ID (str): The project ID of the Vertex AI project. - LOCATION (str): The location of the Vertex AI project. + project_id (str): The project ID of the Vertex AI project. + location (str): The location of the Vertex AI project. max_output_tokens (int): The maximum number of tokens to generate. temperature (float): The temperature controls the randomness of the generated text. top_p (float): Top-p nucleus sampling. top_k (int): Top-k nucleus sampling. """ - self.PROJECT_ID = PROJECT_ID - self.LOCATION = LOCATION + self.project_id = project_id + self.location = location self.parameters = { "max_output_tokens": max_output_tokens, "temperature": temperature, @@ -57,11 +57,11 @@ def __init__( "top_k": top_k, } - vertexai.init(project=self.PROJECT_ID, location=self.LOCATION) + vertexai.init(project=self.project_id, location=self.location) self.model = TextGenerationModel.from_pretrained(config["text_bison_model"]) - def generate_response(self, PROMPT: str) -> str: + def generate_response(self, prompt: str) -> str: """ Generate a response using the Text Bison model. @@ -73,5 +73,5 @@ def generate_response(self, PROMPT: str) -> str: """ print("running tb.generate_response") parameters = self.parameters - response = self.model.predict(PROMPT, **parameters) + response = self.model.predict(prompt, **parameters) return response.text diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/utils/text_bison.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/utils/text_bison.py index f4499043d2..0ebfb3e075 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/utils/text_bison.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/agents/search_agent/utils/text_bison.py @@ -15,10 +15,18 @@ class TextBison: + """ + Initializes the TextBison class for text generation. + + Args: + PROJECT_ID: GCP Project ID. + LOCATION: GCP Region. Defaults to "us-central1". + """ + def __init__( self, - PROJECT_ID=config["PROJECT_ID"], - LOCATION=config["LOCATION"], + project_id=config["PROJECT_ID"], + location=config["LOCATION"], max_output_tokens=2048, temperature=0.05, top_p=0.8, @@ -27,15 +35,15 @@ def __init__( """Initializes the TextBison class. Args: - PROJECT_ID (str): The Google Cloud project ID. - LOCATION (str): The Google Cloud region where the model is deployed. + project_id (str): The Google Cloud project ID. + location (str): The Google Cloud region where the model is deployed. max_output_tokens (int): The maximum number of tokens to generate. temperature (float): The temperature to use for sampling. top_p (float): The top-p value to use for sampling. top_k (int): The top-k value to use for sampling. """ - self.PROJECT_ID = PROJECT_ID - self.LOCATION = LOCATION + self.project_id = project_id + self.location = location self.parameters = { "max_output_tokens": max_output_tokens, "temperature": temperature, @@ -43,11 +51,11 @@ def __init__( "top_k": top_k, } - vertexai.init(project=self.PROJECT_ID, location=self.LOCATION) + vertexai.init(project=self.project_id, location=self.location) self.model = TextGenerationModel.from_pretrained(config["text_bison_model"]) - def generate_response(self, PROMPT): + def generate_response(self, prompt): """Generates a response to a given PROMPT. Args: @@ -57,5 +65,5 @@ def generate_response(self, PROMPT): str: The generated response. """ parameters = self.parameters - response = self.model.predict(PROMPT, **parameters) + response = self.model.predict(prompt, **parameters) return response.text diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/dataframe_answer.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/dataframe_answer.py index 802749b030..0fbbbf49ad 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/dataframe_answer.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/dataframe_answer.py @@ -54,8 +54,8 @@ def generate_answer(question: str) -> str: with open("data/likes.json", "rb") as f: df2 = pd.DataFrame(json.load(f)) - PROMPT = SQL_PROMPT.format(question=question) - answer = tb.generate_response(PROMPT=PROMPT) + prompt = SQL_PROMPT.format(question=question) + answer = tb.generate_response(prompt=prompt) answer = answer.replace("", "") sql_query = answer.replace("", "") sql_query = sql_query.strip() @@ -65,8 +65,8 @@ def generate_answer(question: str) -> str: answer_df = ps.sqldf(sql_query, locals()) print(answer_df) temp_df = answer_df.astype(str) - PROMPT = FINAL_ANSWER_PROMPT.format(question=question, df=temp_df) - answer_natural_language = tb.generate_response(PROMPT) + prompt = FINAL_ANSWER_PROMPT.format(question=question, df=temp_df) + answer_natural_language = tb.generate_response(prompt) print("answer_natural_language : ", answer_natural_language) return answer_natural_language diff --git a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/orchestration_engine.py b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/orchestration_engine.py index b203273d16..e86415363e 100644 --- a/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/orchestration_engine.py +++ b/gemini/sample-apps/agent-assist/backend/src/chatbot_dir/orchestration_engine.py @@ -72,7 +72,7 @@ def create_sales_pitch(prompt: str, policy_name: str) -> str: return response -def generate_email(PROMPT: str, chat_history: str) -> tuple[str, str]: +def generate_email(prompt: str, chat_history: str) -> tuple[str, str]: """Generate email function to handle queries related to generating emails. Args: @@ -82,7 +82,7 @@ def generate_email(PROMPT: str, chat_history: str) -> tuple[str, str]: Returns: tuple[str, str]: A tuple containing the email subject and body. """ - return mail_component(query=PROMPT, chat_history=chat_history) + return mail_component(query=prompt, chat_history=chat_history) def send_email(email_id: str, subject: str, body: str) -> None: diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/cal.py b/gemini/sample-apps/agent-assist/backend/src/utils/cal.py index 19a4a2271d..4a3d53ef23 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/cal.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/cal.py @@ -24,18 +24,18 @@ def __init__(self): Initializes the Calendar class. """ self.self_email = config["company_email"] - self.SCOPES = [config["CALENDAR_SCOPE"]] + self.scopes = [config["CALENDAR_SCOPE"]] self.creds = None if os.path.exists("cal_token.json"): self.creds = Credentials.from_authorized_user_file( - "cal_token.json", self.SCOPES + "cal_token.json", self.scopes ) if not self.creds or not self.creds.valid: if self.creds and self.creds.expired and self.creds.refresh_token: self.creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( - "keys/credentials_desktop.json", self.SCOPES + "keys/credentials_desktop.json", self.scopes ) self.creds = flow.run_local_server(port=0) with open("cal_token.json", "w") as token: @@ -43,7 +43,7 @@ def __init__(self): try: self.service = build("calendar", "v3", credentials=self.creds) except HttpError as error: - print("An error occurred: %s" % error) + print(f"An error occurred: {error}") def create_event( self, email: list[str], start_date_time: str, end_date_time: str @@ -97,7 +97,7 @@ def create_event( .insert(calendarId="primary", body=event, sendUpdates="all") .execute() ) - print("Event created: %s" % (event.get("htmlLink"))) + print(f"Event created: {event.get('htmlLink')}") # print(event) return event diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/gemini_text.py b/gemini/sample-apps/agent-assist/backend/src/utils/gemini_text.py index eb77fc4e2f..6f0eea573f 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/gemini_text.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/gemini_text.py @@ -19,8 +19,8 @@ class GeminiText: A class to interact with the Gemini text generation model from Vertex AI. Args: - PROJECT_ID (str): The Google Cloud project ID. - LOCATION (str): The Google Cloud region where the model is deployed. + project_id (str): The Google Cloud project ID. + location (str): The Google Cloud region where the model is deployed. max_output_tokens (int): The maximum number of tokens to generate. temperature (float): The temperature to use for sampling. top_p (float): The top-p value to use for sampling. @@ -29,8 +29,8 @@ class GeminiText: def __init__( self, - PROJECT_ID=config["PROJECT_ID"], - LOCATION=config["LOCATION"], + project_id=config["PROJECT_ID"], + location=config["LOCATION"], max_output_tokens=2048, temperature=0, top_p=0.8, @@ -40,15 +40,15 @@ def __init__( Initializes the class. Args: - PROJECT_ID (str): The Google Cloud project ID. - LOCATION (str): The Google Cloud region where the model is deployed. + project_id (str): The Google Cloud project ID. + location (str): The Google Cloud region where the model is deployed. max_output_tokens (int): The maximum number of tokens to generate. temperature (float): The temperature to use for sampling. top_p (float): The top-p value to use for sampling. top_k (int): The top-k value to use for sampling. """ - self.PROJECT_ID = PROJECT_ID - self.LOCATION = LOCATION + self.project_id = project_id + self.location = location self.parameters = { "max_output_tokens": max_output_tokens, "temperature": temperature, @@ -56,12 +56,12 @@ def __init__( "top_k": top_k, } - vertexai.init(project=self.PROJECT_ID, location=self.LOCATION) + vertexai.init(project=self.project_id, location=self.location) self.model = GenerativeModel(config["gemini_model"]) self.chat = self.model.start_chat() - def generate_response(self, PROMPT: str) -> str: + def generate_response(self, prompt: str) -> str: """ Generates a response to a given PROMPT. @@ -74,7 +74,7 @@ def generate_response(self, PROMPT: str) -> str: print("running tb.generate_response") parameters = self.parameters # response =self.model.predict(PROMPT,**parameters) - response = self.chat.send_message(PROMPT, generation_config=parameters) + response = self.chat.send_message(prompt, generation_config=parameters) return response.text diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/get_users.py b/gemini/sample-apps/agent-assist/backend/src/utils/get_users.py index 53d6929814..9ac044b867 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/get_users.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/get_users.py @@ -5,11 +5,11 @@ import json -def get_users(isContact: bool = True) -> list: +def get_users(is_contact: bool = True) -> list: """Gets a list of users from a JSON file. Args: - isContact: A boolean value indicating whether to return users who have been contacted or not. + is_contact: A boolean value indicating whether to return users who have been contacted or not. Returns: A list of dictionaries representing users. @@ -18,12 +18,12 @@ def get_users(isContact: bool = True) -> list: with open("data/real_users_db.json") as f: users = json.load(f) - if isContact: + if is_contact: contacted_users = list(filter(lambda x: x["LastContacted"] is not None, users)) return contacted_users - else: - potential_users = list(filter(lambda x: x["LastContacted"] is None, users)) - return potential_users + + potential_users = list(filter(lambda x: x["LastContacted"] is None, users)) + return potential_users if __name__ == "__main__": diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/mail.py b/gemini/sample-apps/agent-assist/backend/src/utils/mail.py index 5bf6fd9347..0aef8464d1 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/mail.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/mail.py @@ -23,9 +23,7 @@ class Mail: Class to send and receive emails. """ - def __init__( - self, sender=config["company_email"], password=config["mail_password"] - ): + def __init__(self, sender=config["company_email"], password=config["mail_password"]): """ Initializes the Mail class. @@ -59,9 +57,7 @@ def send_email(self, to_mail, subject, body, file_path: Any = None) -> None: with open(file_path, "rb") as fil: part = MIMEApplication(fil.read(), Name=basename(file_path)) # After the file is closed - part[ - "Content-Disposition" - ] = f'attachment; filename="{basename(file_path)}"' + part["Content-Disposition"] = f'attachment; filename="{basename(file_path)}"' msg.attach(part) server = smtplib.SMTP("smtp.gmail.com", 587) @@ -109,9 +105,7 @@ def send_calendar_event(self, param: dict) -> None: except Exception as e: print(e) - __location__ = os.path.realpath( - os.path.join(os.getcwd(), os.path.dirname(__file__)) - ) + __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) f = os.path.join(__location__, "invite.ics") with open(f) as file: @@ -124,9 +118,7 @@ def send_calendar_event(self, param: dict) -> None: replaced_contents = replaced_contents.replace( "end_date", param["end_date"].strftime("%Y%m%dT%H%M%SZ") ) - replaced_contents = replaced_contents.replace( - "telephonic", param["location"] - ) + replaced_contents = replaced_contents.replace("telephonic", param["location"]) replaced_contents = replaced_contents.replace( "now", datetime.datetime.now().strftime("%Y%m%dT%H%M%SZ") ) diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/mail_trial.py b/gemini/sample-apps/agent-assist/backend/src/utils/mail_trial.py index b903a38663..b1a3d601df 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/mail_trial.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/mail_trial.py @@ -13,17 +13,20 @@ def show_chatty_threads(): - SCOPES = [config["MAIL_TRIAL_SCOPE"]] + """Shows basic usage of the Gmail API. + Prints the threads in the user's mailbox. + """ + scopes = [config["MAIL_TRIAL_SCOPE"]] creds = None if os.path.exists("mail_token.json"): - creds = Credentials.from_authorized_user_file("mail_token.json", SCOPES) + creds = Credentials.from_authorized_user_file("mail_token.json", scopes) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( - "keys/read_mail.json", SCOPES + "keys/read_mail.json", scopes ) creds = flow.run_local_server(port=0) with open("mail_token.json", "w") as token: @@ -40,6 +43,7 @@ def show_chatty_threads(): except HttpError as error: print(f"An error occurred: {error}") + return None if __name__ == "__main__": diff --git a/gemini/sample-apps/agent-assist/backend/src/utils/text_bison.py b/gemini/sample-apps/agent-assist/backend/src/utils/text_bison.py index e61b438c8b..733dcf9165 100644 --- a/gemini/sample-apps/agent-assist/backend/src/utils/text_bison.py +++ b/gemini/sample-apps/agent-assist/backend/src/utils/text_bison.py @@ -18,26 +18,26 @@ class TextBison: def __init__( self, - PROJECT_ID=config["PROJECT_ID"], # GCP Project ID - LOCATION=config["LOCATION"], # GCP Region + project_id=config["PROJECT_ID"], # GCP Project ID + location=config["LOCATION"], # GCP Region ): """ Initializes the TextBison class for text generation. Args: - PROJECT_ID: GCP Project ID. - LOCATION: GCP Region. Defaults to "us-central1". + project_id: GCP Project ID. + location: GCP Region. Defaults to "us-central1". """ - self.PROJECT_ID = PROJECT_ID - self.LOCATION = LOCATION + self.project_id = project_id + self.location = location self.parameters = config["text_bison_parameters"] # Initialize the Vertex AI client library - vertexai.init(project=self.PROJECT_ID, location=self.LOCATION) + vertexai.init(project=self.project_id, location=self.location) # Load the pre-trained Text-Bison model self.model = TextGenerationModel.from_pretrained("text-bison") - def generate_response(self, PROMPT: str) -> str: + def generate_response(self, prompt: str) -> str: """ Generates a text response using the Text-Bison model. @@ -49,5 +49,5 @@ def generate_response(self, PROMPT: str) -> str: """ print("running tb.generate_response") parameters = self.parameters - response = self.model.predict(PROMPT, **parameters) + response = self.model.predict(prompt, **parameters) return response.text