diff --git a/scripts/demo/question_answering/README.md b/demo/question_answering/README.md similarity index 100% rename from scripts/demo/question_answering/README.md rename to demo/question_answering/README.md diff --git a/demo/question_answering/qa_config.yaml b/demo/question_answering/qa_config.yaml new file mode 100644 index 00000000..6da50b40 --- /dev/null +++ b/demo/question_answering/qa_config.yaml @@ -0,0 +1,44 @@ +# This is the configuration file for the question answering agent. It follows the same syntax as the YAML used for Hydra. +# You can read more about Hydra configuration here: https://hydra.cc/docs/intro/ +# The most basic config is the qa_agent. the _target_ key is used to specify the class that will be used to create the agent instance. +# For attributes that can be used for the agent, you can refer the API documentation. + +shared_memory: + _target_: sherpa_ai.memory.shared_memory.SharedMemory # The absolute path to the share memory class in the library + objective: Answer the question # Objective for the agent, since this is a question answering agent, the objective is to answer questions + +llm: # Configuration for the llm, here we are using the OpenAI GPT-3.5-turbo model + _target_: langchain.chat_models.ChatOpenAI + model_name: gpt-3.5-turbo + temperature: 0 + +agent_config: # For the demo, default configuration is used. You can change the configuration as per your requirement + _target_: sherpa_ai.config.task_config.AgentConfig + +citation_validation: # The tool used to validate and add citation to the answer + _target_: sherpa_ai.output_parsers.citation_validation.CitationValidation + sequence_threshold: 0.5 + jaccard_threshold: 0.5 + token_overlap: 0.5 + +google_search: + _target_: sherpa_ai.actions.GoogleSearch + role_description: Act as a question answering agent + task: Question answering + llm: ${llm} + include_metadata: true + config: ${agent_config} + +qa_agent: + _target_: sherpa_ai.agents.qa_agent.QAAgent + llm: ${llm} + shared_memory: ${shared_memory} + name: QA Sherpa + description: You are a research for natural language processing question for answers to questions. Do not answering any question not related to NLP + agent_config: ${agent_config} + num_runs: 1 + validation_steps: 1 + actions: + - ${google_search} + validations: + - ${citation_validation} diff --git a/scripts/demo/question_answering/demo.py b/demo/question_answering/qa_demo.py similarity index 68% rename from scripts/demo/question_answering/demo.py rename to demo/question_answering/qa_demo.py index c1be6432..23a9850a 100644 --- a/scripts/demo/question_answering/demo.py +++ b/demo/question_answering/qa_demo.py @@ -8,14 +8,16 @@ if __name__ == "__main__": parser = ArgumentParser() - parser.add_argument("--config", type=str) + parser.add_argument("--config", type=str, default="qa_config.yaml") args = parser.parse_args() qa_agent = get_qa_agent_from_config_file(args.config) - while True: question = input("Ask me a question: ") + + # Add the question to the shared memory. By default, the agent will take the last + # message in the shared memory as the task. qa_agent.shared_memory.add(EventType.task, "human", question) result = qa_agent.run() - print(result) \ No newline at end of file + print(result) diff --git a/scripts/demo/software_modeler/README.md b/demo/software_modeler/README.md similarity index 100% rename from scripts/demo/software_modeler/README.md rename to demo/software_modeler/README.md diff --git a/scripts/demo/software_modeler/modeler.yaml b/demo/software_modeler/modeler_config.yaml similarity index 89% rename from scripts/demo/software_modeler/modeler.yaml rename to demo/software_modeler/modeler_config.yaml index 94ede131..f7764a14 100644 --- a/scripts/demo/software_modeler/modeler.yaml +++ b/demo/software_modeler/modeler_config.yaml @@ -1,6 +1,6 @@ shared_memory: _target_: sherpa_ai.memory.shared_memory.SharedMemory - objective: Answer the question + objective: Generate a domain model from the user's description user_id: none team_id: none @@ -21,7 +21,8 @@ deliberation: role_description: You are a software modeler capable of creating a domain model from the user's description. Output the result in Umple code. llm: ${llm} -output_file: +# The following customized actions are defined in verifiers.py +output_file: _target_: verifiers.OutputModel filename: model.ump @@ -29,7 +30,7 @@ generation: _target_: verifiers.UmpleGeneration -qa_agent: +qa_agent: # QA Agent customized to perform softwaare modeling with custom prompts _target_: sherpa_ai.agents.qa_agent.QAAgent llm: ${llm} shared_memory: ${shared_memory} diff --git a/scripts/demo/software_modeler/demo.py b/demo/software_modeler/modeler_demo.py similarity index 65% rename from scripts/demo/software_modeler/demo.py rename to demo/software_modeler/modeler_demo.py index 3be5f0b5..ab6a19d3 100644 --- a/scripts/demo/software_modeler/demo.py +++ b/demo/software_modeler/modeler_demo.py @@ -9,16 +9,22 @@ if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--config", type=str) - parser.add_argument("--desc", type=str, help="Path to a text file containing the domain description") + parser.add_argument( + "--desc", type=str, help="Path to a text file containing the domain description" + ) args = parser.parse_args() qa_agent = get_qa_agent_from_config_file(args.config) print(f"Generating domain model for {args.desc}") - + with open(args.desc, "r") as f: domain_desc = f.read() - qa_agent.shared_memory.add(EventType.task, "human", f"Generate domain model the following description:\n {domain_desc}") + qa_agent.shared_memory.add( + EventType.task, + "human", + f"Generate domain model the following description:\n {domain_desc}", + ) result = qa_agent.run() - print(result) \ No newline at end of file + print(result) diff --git a/demo/software_modeler/umple.jar b/demo/software_modeler/umple.jar new file mode 100644 index 00000000..b0141a58 Binary files /dev/null and b/demo/software_modeler/umple.jar differ diff --git a/scripts/demo/software_modeler/verifiers.py b/demo/software_modeler/verifiers.py similarity index 58% rename from scripts/demo/software_modeler/verifiers.py rename to demo/software_modeler/verifiers.py index 5ad9995b..23dd7eab 100644 --- a/scripts/demo/software_modeler/verifiers.py +++ b/demo/software_modeler/verifiers.py @@ -7,10 +7,28 @@ class OutputModel(BaseOutputProcessor): + """ + Validate the generated model and save it to a file + + Attributes: + filename: The name of the file to save the model + """ + def __init__(self, filename: str): self.filename = filename def process_output(self, text: str, belief: Belief) -> ValidationResult: + """ + Filtering the generated text to extract the Umple model and save it to a file + + Args: + text: The generated text + belief: The belief of the agent + + Returns: + ValidationResult: The result of the validation, since the main purpose is to save the model, + the result is always valid + """ lines = text.split("\n") line_num = 0 print(text) @@ -45,17 +63,39 @@ def get_failure_message(self) -> str: class UmpleGeneration(BaseOutputProcessor): - def __init__(self, umple_process="umple.jar"): - self.umple_process = umple_process - self.fail_count=0 + """ + Use Umple to validate the model and generate a class diagram + + Attributes: + umple_path: The path to the umple jar file + fail_count: The number of times the model generation failed + last_error: The last error message received + """ + + def __init__(self, umple_path="umple.jar"): + self.umple_path = umple_path + self.fail_count = 0 self.last_error = "" def process_output(self, text: str, belief: Belief) -> ValidationResult: + """ + Validate the model and generate a class diagram + + Args: + text: The generated model in Umple format + belief: The belief of the agent + + Returns: + ValidationResult: The result of the validation, true if model text passes Umple validation and is able to used + to generate a class diagram, false otherwise + """ if self.fail_count >= 3: - input(f"Unable to fix the model. Please help me to fix the intermediate representation. Last error received: \n {self.last_error} \n Press Enter to continue...") + input( + f"Unable to fix the model. Please help me to fix the intermediate representation. Last error received: \n {self.last_error} \n Press Enter to continue..." + ) result = subprocess.run( - ["java", "-jar", self.umple_process, "-g", "java", "model.ump"], + ["java", "-jar", self.umple_path, "-g", "java", "model.ump"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, @@ -74,7 +114,7 @@ def process_output(self, text: str, belief: Belief) -> ValidationResult: # generate Class Diagram subprocess.check_output( - ["java", "-jar", self.umple_process, "-g", "GvClassDiagram", "model.ump"], + ["java", "-jar", self.umple_path, "-g", "GvClassDiagram", "model.ump"], ) subprocess.check_output( diff --git a/scripts/demo/question_answering/qa.yaml b/scripts/demo/question_answering/qa.yaml deleted file mode 100644 index e3c9df82..00000000 --- a/scripts/demo/question_answering/qa.yaml +++ /dev/null @@ -1,44 +0,0 @@ -shared_memory: - _target_: sherpa_ai.memory.shared_memory.SharedMemory - objective: Answer the question - -user_id: none -team_id: none - -llm: - _target_: sherpa_ai.models.sherpa_base_chat_model.SherpaChatOpenAI - model_name: gpt-3.5-turbo - temperature: 0 - user_id: ${user_id} - team_id: ${team_id} - -agent_config: - _target_: sherpa_ai.config.task_config.AgentConfig - -citation_validation: - _target_: sherpa_ai.output_parsers.citation_validation.CitationValidation - sequence_threshold: 0.5 - jaccard_threshold: 0.5 - token_overlap: 0.5 - -google_search: - _target_: sherpa_ai.actions.GoogleSearch - role_description: Act as a question answering agent - task: Question answering - llm: ${llm} - include_metadata: true - config: ${agent_config} - -qa_agent: - _target_: sherpa_ai.agents.qa_agent.QAAgent - llm: ${llm} - shared_memory: ${shared_memory} - name: QA Sherpa - description: You are a research for natural language processing question for answers to questions. Do not answering any question not related to NLP - agent_config: ${agent_config} - num_runs: 1 - validation_steps: 1 - actions: - - ${google_search} - validations: - - ${citation_validation}