Skip to content

Commit

Permalink
Improve instruction
Browse files Browse the repository at this point in the history
  • Loading branch information
20001LastOrder committed Mar 19, 2024
1 parent 79d0e7a commit bff1763
Show file tree
Hide file tree
Showing 9 changed files with 109 additions and 60 deletions.
File renamed without changes.
44 changes: 44 additions & 0 deletions demo/question_answering/qa_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# This is the configuration file for the question answering agent. It follows the same syntax as the YAML used for Hydra.
# You can read more about Hydra configuration here: https://hydra.cc/docs/intro/
# The most basic config is the qa_agent. the _target_ key is used to specify the class that will be used to create the agent instance.
# For attributes that can be used for the agent, you can refer the API documentation.

shared_memory:
_target_: sherpa_ai.memory.shared_memory.SharedMemory # The absolute path to the share memory class in the library
objective: Answer the question # Objective for the agent, since this is a question answering agent, the objective is to answer questions

llm: # Configuration for the llm, here we are using the OpenAI GPT-3.5-turbo model
_target_: langchain.chat_models.ChatOpenAI
model_name: gpt-3.5-turbo
temperature: 0

agent_config: # For the demo, default configuration is used. You can change the configuration as per your requirement
_target_: sherpa_ai.config.task_config.AgentConfig

citation_validation: # The tool used to validate and add citation to the answer
_target_: sherpa_ai.output_parsers.citation_validation.CitationValidation
sequence_threshold: 0.5
jaccard_threshold: 0.5
token_overlap: 0.5

google_search:
_target_: sherpa_ai.actions.GoogleSearch
role_description: Act as a question answering agent
task: Question answering
llm: ${llm}
include_metadata: true
config: ${agent_config}

qa_agent:
_target_: sherpa_ai.agents.qa_agent.QAAgent
llm: ${llm}
shared_memory: ${shared_memory}
name: QA Sherpa
description: You are a research for natural language processing question for answers to questions. Do not answering any question not related to NLP
agent_config: ${agent_config}
num_runs: 1
validation_steps: 1
actions:
- ${google_search}
validations:
- ${citation_validation}
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,16 @@

if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", type=str)
parser.add_argument("--config", type=str, default="qa_config.yaml")
args = parser.parse_args()

qa_agent = get_qa_agent_from_config_file(args.config)


while True:
question = input("Ask me a question: ")

# Add the question to the shared memory. By default, the agent will take the last
# message in the shared memory as the task.
qa_agent.shared_memory.add(EventType.task, "human", question)
result = qa_agent.run()
print(result)
print(result)
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
shared_memory:
_target_: sherpa_ai.memory.shared_memory.SharedMemory
objective: Answer the question
objective: Generate a domain model from the user's description

user_id: none
team_id: none
Expand All @@ -21,15 +21,16 @@ deliberation:
role_description: You are a software modeler capable of creating a domain model from the user's description. Output the result in Umple code.
llm: ${llm}

output_file:
# The following customized actions are defined in verifiers.py
output_file:
_target_: verifiers.OutputModel
filename: model.ump

generation:
_target_: verifiers.UmpleGeneration


qa_agent:
qa_agent: # QA Agent customized to perform softwaare modeling with custom prompts
_target_: sherpa_ai.agents.qa_agent.QAAgent
llm: ${llm}
shared_memory: ${shared_memory}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,22 @@
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", type=str)
parser.add_argument("--desc", type=str, help="Path to a text file containing the domain description")
parser.add_argument(
"--desc", type=str, help="Path to a text file containing the domain description"
)
args = parser.parse_args()

qa_agent = get_qa_agent_from_config_file(args.config)

print(f"Generating domain model for {args.desc}")

with open(args.desc, "r") as f:
domain_desc = f.read()

qa_agent.shared_memory.add(EventType.task, "human", f"Generate domain model the following description:\n {domain_desc}")
qa_agent.shared_memory.add(
EventType.task,
"human",
f"Generate domain model the following description:\n {domain_desc}",
)
result = qa_agent.run()
print(result)
print(result)
Binary file added demo/software_modeler/umple.jar
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,28 @@


class OutputModel(BaseOutputProcessor):
"""
Validate the generated model and save it to a file
Attributes:
filename: The name of the file to save the model
"""

def __init__(self, filename: str):
self.filename = filename

def process_output(self, text: str, belief: Belief) -> ValidationResult:
"""
Filtering the generated text to extract the Umple model and save it to a file
Args:
text: The generated text
belief: The belief of the agent
Returns:
ValidationResult: The result of the validation, since the main purpose is to save the model,
the result is always valid
"""
lines = text.split("\n")
line_num = 0
print(text)
Expand Down Expand Up @@ -45,17 +63,39 @@ def get_failure_message(self) -> str:


class UmpleGeneration(BaseOutputProcessor):
def __init__(self, umple_process="umple.jar"):
self.umple_process = umple_process
self.fail_count=0
"""
Use Umple to validate the model and generate a class diagram
Attributes:
umple_path: The path to the umple jar file
fail_count: The number of times the model generation failed
last_error: The last error message received
"""

def __init__(self, umple_path="umple.jar"):
self.umple_path = umple_path
self.fail_count = 0
self.last_error = ""

def process_output(self, text: str, belief: Belief) -> ValidationResult:
"""
Validate the model and generate a class diagram
Args:
text: The generated model in Umple format
belief: The belief of the agent
Returns:
ValidationResult: The result of the validation, true if model text passes Umple validation and is able to used
to generate a class diagram, false otherwise
"""
if self.fail_count >= 3:
input(f"Unable to fix the model. Please help me to fix the intermediate representation. Last error received: \n {self.last_error} \n Press Enter to continue...")
input(
f"Unable to fix the model. Please help me to fix the intermediate representation. Last error received: \n {self.last_error} \n Press Enter to continue..."
)

result = subprocess.run(
["java", "-jar", self.umple_process, "-g", "java", "model.ump"],
["java", "-jar", self.umple_path, "-g", "java", "model.ump"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
Expand All @@ -74,7 +114,7 @@ def process_output(self, text: str, belief: Belief) -> ValidationResult:

# generate Class Diagram
subprocess.check_output(
["java", "-jar", self.umple_process, "-g", "GvClassDiagram", "model.ump"],
["java", "-jar", self.umple_path, "-g", "GvClassDiagram", "model.ump"],
)

subprocess.check_output(
Expand Down
44 changes: 0 additions & 44 deletions scripts/demo/question_answering/qa.yaml

This file was deleted.

0 comments on commit bff1763

Please sign in to comment.