From e8e97eaeeea77fba91b401c3cf5d7ea347d7493a Mon Sep 17 00:00:00 2001
From: vyokky <7678676@qq.com>
Date: Fri, 28 Jun 2024 21:21:18 +0800
Subject: [PATCH 01/21] documentation
---
documents/docs/project/round.md | 2 +-
ufo/module/basic.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/documents/docs/project/round.md b/documents/docs/project/round.md
index 3cd5454b..3f53f7be 100644
--- a/documents/docs/project/round.md
+++ b/documents/docs/project/round.md
@@ -29,7 +29,7 @@ def run(self) -> None:
# If the subtask ends, capture the last snapshot of the application.
if self.state.is_subtask_end():
- time.sleep(3)
+ time.sleep(configs["SLEEP_TIME"])
self.capture_last_snapshot(sub_round_id=self.subtask_amount)
self.subtask_amount += 1
diff --git a/ufo/module/basic.py b/ufo/module/basic.py
index a89da776..adb5fe1c 100644
--- a/ufo/module/basic.py
+++ b/ufo/module/basic.py
@@ -109,7 +109,7 @@ def run(self) -> None:
# If the subtask ends, capture the last snapshot of the application.
if self.state.is_subtask_end():
- time.sleep(3)
+ time.sleep(configs["SLEEP_TIME"])
self.capture_last_snapshot(sub_round_id=self.subtask_amount)
self.subtask_amount += 1
From a12bdc1357073672ec135ad10f69bf2c38ba3e42 Mon Sep 17 00:00:00 2001
From: vyokky <7678676@qq.com>
Date: Fri, 28 Jun 2024 21:31:42 +0800
Subject: [PATCH 02/21] doc
---
documents/docs/supported_models/overview.md | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/documents/docs/supported_models/overview.md b/documents/docs/supported_models/overview.md
index 1fab7746..d38c7fb7 100644
--- a/documents/docs/supported_models/overview.md
+++ b/documents/docs/supported_models/overview.md
@@ -11,4 +11,8 @@ Please refer to the following sections for more information on the supported mod
| `Gemini` | [Gemini API](./gemini.md) |
| `QWEN` | [QWEN API](./qwen.md) |
| `Ollama` | [Ollama API](./ollama.md) |
-| `Custom` | [Custom API](./custom_model.md) |
\ No newline at end of file
+| `Custom` | [Custom API](./custom_model.md) |
+
+
+!!! info
+ Each model is implemented as a separate class in the `ufo/llm` directory, and uses the functions `chat_completion` defined in the `BaseService` class of the `ufo/llm/base.py` file to obtain responses from the model.
\ No newline at end of file
From 3258f45a31e2765398d8cab0d3c8d2e70496a8d7 Mon Sep 17 00:00:00 2001
From: vyokky <7678676@qq.com>
Date: Fri, 28 Jun 2024 21:33:44 +0800
Subject: [PATCH 03/21] doc
---
documents/docs/{about => }/faq.md | 0
documents/mkdocs.yml | 2 +-
2 files changed, 1 insertion(+), 1 deletion(-)
rename documents/docs/{about => }/faq.md (100%)
diff --git a/documents/docs/about/faq.md b/documents/docs/faq.md
similarity index 100%
rename from documents/docs/about/faq.md
rename to documents/docs/faq.md
diff --git a/documents/mkdocs.yml b/documents/mkdocs.yml
index ca2a38cd..c5c8dcba 100644
--- a/documents/mkdocs.yml
+++ b/documents/mkdocs.yml
@@ -70,7 +70,7 @@ nav:
- License: about/LICENSE.md
- Disclaimer: about/DISCLAIMER.md
- Support: about/SUPPORT.md
- - FAQ: about/faq.md
+ - FAQ: faq.md
markdown_extensions:
- pymdownx.tasklist
From ef519f191fe4ec599c75f0cdeeb3805650fc0cf6 Mon Sep 17 00:00:00 2001
From: vyokky <7678676@qq.com>
Date: Fri, 28 Jun 2024 21:36:35 +0800
Subject: [PATCH 04/21] doc
---
documents/docs/faq.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/documents/docs/faq.md b/documents/docs/faq.md
index 8eae129d..e91291ca 100644
--- a/documents/docs/faq.md
+++ b/documents/docs/faq.md
@@ -27,4 +27,5 @@ A: It depends on the language model you are using. Most of LLMs support multiple
## Q8: It shows the error `Error making API request: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))` when I run UFO. What should I do?
A: This means the LLM endpoint is not accessible. You can check the network connection (e.g. VPN) and the status of the LLM endpoint.
-To get more support, please submit an issue on the [GitHub Issues](https://github.com/microsoft/UFO/issues), or send an email to [ufo-agent@microsoft.com](mailto:ufo-agent@microsoft.com).
\ No newline at end of file
+!!! tip
+ To get more support, please submit an issue on the [GitHub Issues](https://github.com/microsoft/UFO/issues), or send an email to [ufo-agent@microsoft.com](mailto:ufo-agent@microsoft.com).
\ No newline at end of file
From 0f587629eef215b2cc4e0555e8a6f5bd9d3e1cdc Mon Sep 17 00:00:00 2001
From: vyokky <7678676@qq.com>
Date: Mon, 1 Jul 2024 21:33:33 +0800
Subject: [PATCH 05/21] agent doc
---
README.md | 2 +-
documents/docs/agents/app_agent.md | 151 ++++++++++++++++++
documents/docs/agents/design/blackboard.md | 55 +++++++
documents/docs/agents/design/memory.md | 24 +++
documents/docs/agents/design/processor.md | 29 ++++
documents/docs/agents/design/prompter.md | 47 ++++++
documents/docs/agents/design/state.md | 120 ++++++++++++++
documents/docs/agents/evaluation_agent.md | 35 ++++
documents/docs/agents/follower_agent.md | 28 ++++
documents/docs/agents/host_agent.md | 140 ++++++++++++++++
documents/docs/agents/overview.md | 37 +++++
documents/docs/automator/overview.md | 0
.../configurations/developer_configuration.md | 4 +-
documents/docs/img/appagent.png | Bin 0 -> 192024 bytes
documents/docs/img/blackboard.png | Bin 0 -> 105351 bytes
documents/mkdocs.yml | 7 +-
ufo/prompter/basic.py | 15 ++
17 files changed, 686 insertions(+), 8 deletions(-)
create mode 100644 documents/docs/agents/app_agent.md
create mode 100644 documents/docs/agents/design/blackboard.md
create mode 100644 documents/docs/agents/design/memory.md
create mode 100644 documents/docs/agents/design/processor.md
create mode 100644 documents/docs/agents/design/prompter.md
create mode 100644 documents/docs/agents/design/state.md
create mode 100644 documents/docs/agents/evaluation_agent.md
create mode 100644 documents/docs/agents/follower_agent.md
create mode 100644 documents/docs/agents/host_agent.md
create mode 100644 documents/docs/automator/overview.md
create mode 100644 documents/docs/img/appagent.png
create mode 100644 documents/docs/img/blackboard.png
diff --git a/README.md b/README.md
index ac7c648a..d962e7a7 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,7 @@
- AppAgent ๐พ, responsible for iteratively executing actions on the selected applications until the task is successfully concluded within a specific application.
- Control Interaction ๐ฎ, is tasked with translating actions from HostAgent and AppAgent into interactions with the application and its UI controls. It's essential that the targeted controls are compatible with the Windows **UI Automation** or **Win32** API.
-Both agents leverage the multi-modal capabilities of GPT-Vision to comprehend the application UI and fulfill the user's request. For more details, please consult our [technical report](https://arxiv.org/abs/2402.07939).
+Both agents leverage the multi-modal capabilities of GPT-Vision to comprehend the application UI and fulfill the user's request. For more details, please consult our [technical report](https://arxiv.org/abs/2402.07939) and [Documentation](https://microsoft.github.io/UFO/).
diff --git a/documents/docs/agents/app_agent.md b/documents/docs/agents/app_agent.md
new file mode 100644
index 00000000..c9934f3c
--- /dev/null
+++ b/documents/docs/agents/app_agent.md
@@ -0,0 +1,151 @@
+# AppAgent ๐พ
+
+An `AppAgent` is responsible for iteratively executing actions on the selected applications until the task is successfully concluded within a specific application. The `AppAgent` is created by the `HostAgent` to fulfill a sub-task within a `Round`. The `AppAgent` is responsible for executing the necessary actions within the application to fulfill the user's request. The `AppAgent` has the following features:
+
+1. **[ReAct](https://arxiv.org/abs/2210.03629) with the Application** - The `AppAgent` recursively interacts with the application in a workflow of observation->thought->action, leveraging the multi-modal capabilities of Visual Language Models (VLMs) to comprehend the application UI and fulfill the user's request.
+2. **Comprehension Enhancement** - The `AppAgent` is enhanced by Retrieval Augmented Generation (RAG) from heterogeneous sources, including external knowledge bases, and demonstration libraries, making the agent an application "expert".
+3. **Versatile Skill Set** - The `AppAgent` is equipped with a diverse set of skills to support comprehensive automation, such as mouse, keyboard, native APIs, and "Copilot".
+
+We show the framework of the `AppAgent` in the following diagram:
+
+
+
+
+
+## AppAgent Input
+
+To interact with the application, the `AppAgent` receives the following inputs:
+
+| Input | Description | Type |
+| --- | --- | --- |
+| User Request | The user's request in natural language. | String |
+| Sub-Task | The sub-task description to be executed by the `AppAgent`, assigned by the `HostAgent`. | String |
+| Current Application | The name of the application to be interacted with. | String |
+| Control Information | Index, name and control type of available controls in the application. | List of Dictionaries |
+| Application Screenshots | Screenshots of the application to provide context to the `AppAgent`. | Image |
+| Previous Sub-Tasks | The previous sub-tasks and their completion status. | List of Strings |
+| Previous Plan | The previous plan for the following steps. | List of Strings |
+| HostAgent Message | The message from the `HostAgent` for the completion of the sub-task. | String |
+| Retrived Information | The retrieved information from external knowledge bases or demonstration libraries. | String |
+| Blackboard | The shared memory space for storing and sharing information among the agents. | Dictionary |
+
+By processing these inputs, the `AppAgent` determines the necessary actions to fulfill the user's request within the application.
+
+## AppAgent Output
+
+With the inputs provided, the `AppAgent` generates the following outputs:
+
+| Output | Description | Type |
+| --- | --- | --- |
+| Observation | The observation of the current application screenshots. | String |
+| Thought | The logical reasoning process of the `AppAgent`. | String |
+| ControlLabel | The index of the selected control to interact with. | String |
+| ControlText | The name of the selected control to interact with. | String |
+| Function | The function to be executed on the selected control. | String |
+| Args | The arguments required for the function execution. | List of Strings |
+| Status | The status of the agent, mapped to the `AgentState`. | String |
+| Plan | The plan for the following steps after the current action. | List of Strings |
+| Comment | Additional comments or information provided to the user. | String |
+| SaveScreenshot | The flag to save the screenshot of the application to the `blackboard` for future reference. | Boolean |
+
+Below is an example of the `AppAgent` output:
+
+```json
+{
+ "Observation": "Application screenshot",
+ "Thought": "Logical reasoning process",
+ "ControlLabel": "Control index",
+ "ControlText": "Control name",
+ "Function": "Function name",
+ "Args": ["arg1", "arg2"],
+ "Status": "AgentState",
+ "Plan": ["Step 1", "Step 2"],
+ "Comment": "Additional comments",
+ "SaveScreenshot": true
+}
+```
+
+!!! info
+ The `AppAgent` output is formatted as a JSON object by LLMs and can be parsed by the `json.loads` method in Python.
+
+
+## AppAgent State
+The `AppAgent` state is managed by a state machine that determines the next action to be executed based on the current state, as defined in the `ufo/agents/states/app_agent_states.py` module. The states include:
+
+| State | Description |
+| --- | --- |
+| `CONTINUE` | The `AppAgent` continues executing the current action. |
+| `FINISH` | The `AppAgent` has completed the current sub-task. |
+| `ERROR` | The `AppAgent` encountered an error during execution. |
+| `FAIL` | The `AppAgent` believes the current sub-task is unachievable. |
+| `PENDING` | The `AppAgent` is waiting for user input or external information to proceed. |
+| `CONFIRM` | The `AppAgent` is confirming the user's input or action. |
+| `SCREENSHOT` | The `AppAgent` believes the current screenshot is not clear in annotating the control and requests a new screenshot. |
+
+The `AppAgent` progresses through these states to execute the necessary actions within the application and fulfill the sub-task assigned by the `HostAgent`.
+
+
+## Knowledge Enhancement
+The `AppAgent` is enhanced by Retrieval Augmented Generation (RAG) from heterogeneous sources, including external knowledge bases and demonstration libraries. The `AppAgent` leverages this knowledge to enhance its comprehension of the application and learn from demonstrations to improve its performance.
+
+### Learning from Help Documents
+User can provide help documents to the `AppAgent` to enhance its comprehension of the application and improve its performance in the `config.yaml` file.
+
+!!! tip
+ Please find details configuration in the [documentation](../configurations/user_configuration.md).
+!!! tip
+ You may also refer to the [here]() for how to provide help documents to the `AppAgent`.
+
+
+In the `AppAgent`, it calls the `build_offline_docs_retriever` to build a help document retriever, and uses the `retrived_documents_prompt_helper` to contruct the prompt for the `AppAgent`.
+
+
+
+### Learning from Bing Search
+Since help documents may not cover all the information or the information may be outdated, the `AppAgent` can also leverage Bing search to retrieve the latest information. You can activate Bing search and configure the search engine in the `config.yaml` file.
+
+!!! tip
+ Please find details configuration in the [documentation](../configurations/user_configuration.md).
+!!! tip
+ You may also refer to the [here]() for the implementation of Bing search in the `AppAgent`.
+
+In the `AppAgent`, it calls the `build_online_search_retriever` to build a Bing search retriever, and uses the `retrived_documents_prompt_helper` to contruct the prompt for the `AppAgent`.
+
+
+### Learning from Self-Demonstrations
+You may save successful action trajectories in the `AppAgent` to learn from self-demonstrations and improve its performance. After the completion of a `session`, the `AppAgent` will ask the user whether to save the action trajectories for future reference. You may configure the use of self-demonstrations in the `config.yaml` file.
+
+!!! tip
+ You can find details of the configuration in the [documentation](../configurations/user_configuration.md).
+
+!!! tip
+ You may also refer to the [here]() for the implementation of self-demonstrations in the `AppAgent`.
+
+In the `AppAgent`, it calls the `build_experience_retriever` to build a self-demonstration retriever, and uses the `rag_experience_retrieve` to retrieve the demonstration for the `AppAgent`.
+
+### Learning from Human Demonstrations
+In addition to self-demonstrations, you can also provide human demonstrations to the `AppAgent` to enhance its performance by using the [Step Recorder](https://support.microsoft.com/en-us/windows/record-steps-to-reproduce-a-problem-46582a9b-620f-2e36-00c9-04e25d784e47) tool built in the Windows OS. The `AppAgent` will learn from the human demonstrations to improve its performance and achieve better personalization. The use of human demonstrations can be configured in the `config.yaml` file.
+
+!!! tip
+ You can find details of the configuration in the [documentation](../configurations/user_configuration.md).
+!!! tip
+ You may also refer to the [here]() for the implementation of human demonstrations in the `AppAgent`.
+
+In the `AppAgent`, it calls the `build_human_demonstration_retriever` to build a human demonstration retriever, and uses the `rag_experience_retrieve` to retrieve the demonstration for the `AppAgent`.
+
+
+## Skill Set for Automation
+The `AppAgent` is equipped with a versatile skill set to support comprehensive automation within the application by calling the `create_puppteer_interface` method. The skills include:
+
+| Skill | Description |
+| --- | --- |
+| UI Automation | Mimicking user interactions with the application UI controls using the `UI Automation` and `Win32` API. |
+| Native API | Accessing the application's native API to execute specific functions and actions. |
+| In-App Agent | Leveraging the in-app agent to interact with the application's internal functions and features. |
+
+By utilizing these skills, the `AppAgent` can efficiently interact with the application and fulfill the user's request. You can find more details in the [Automator](../automator/overview.md) documentation and the code in the `ufo/automator` module.
+
+
+# Reference
+
+:::agents.agent.app_agent.AppAgent
\ No newline at end of file
diff --git a/documents/docs/agents/design/blackboard.md b/documents/docs/agents/design/blackboard.md
new file mode 100644
index 00000000..dd40719f
--- /dev/null
+++ b/documents/docs/agents/design/blackboard.md
@@ -0,0 +1,55 @@
+# Agent Blackboard
+
+The `Blackboard` is a shared memory space that is visible to all agents in the UFO framework. It stores information required for agents to interact with the user and applications at every step. The `Blackboard` is a key component of the UFO framework, enabling agents to share information and collaborate to fulfill user requests. The `Blackboard` is implemented as a class in the `ufo/agents/memory/blackboard.py` file.
+
+## Components
+
+The `Blackboard` consists of the following data components:
+
+| Component | Description |
+| --- | --- |
+| `questions` | A list of questions that UFO asks the user, along with their corresponding answers. |
+| `requests` | A list of historical user requests received in previous `Round`. |
+| `trajectories` | A list of step-wise trajectories that record the agent's actions and decisions at each step. |
+| `screenshots` | A list of screenshots taken by the agent when it believes the current state is important for future reference. |
+
+!!! tip
+ The keys stored in the `trajectories` are configured as `HISTORY_KEYS` in the `config_dev.yaml` file. You can customize the keys based on your requirements and the agent's logic.
+
+!!! tip
+ Whether to save the screenshots is determined by the `AppAgent`. You can enable or disable screenshot capture by setting the `SCREENSHOT_TO_MEMORY` flag in the `config_dev.yaml` file.
+
+## Blackboard to Prompt
+
+Data in the `Blackboard` is based on the `MemoryItem` class. It has a method `blackboard_to_prompt` that converts the information stored in the `Blackboard` to a string prompt. Agents call this method to construct the prompt for the LLM's inference. The `blackboard_to_prompt` method is defined as follows:
+
+```python
+def blackboard_to_prompt(self) -> List[str]:
+ """
+ Convert the blackboard to a prompt.
+ :return: The prompt.
+ """
+ prefix = [
+ {
+ "type": "text",
+ "text": "[Blackboard:]",
+ }
+ ]
+
+ blackboard_prompt = (
+ prefix
+ + self.texts_to_prompt(self.questions, "[Questions & Answers:]")
+ + self.texts_to_prompt(self.requests, "[Request History:]")
+ + self.texts_to_prompt(self.trajectories, "[Step Trajectories:]")
+ + self.screenshots_to_prompt()
+ )
+
+ return blackboard_prompt
+```
+
+## Reference
+
+:::agents.memory.blackboard.Blackboard
+
+!!!note
+ You can customize the class to tailor the `Blackboard` to your requirements.
\ No newline at end of file
diff --git a/documents/docs/agents/design/memory.md b/documents/docs/agents/design/memory.md
new file mode 100644
index 00000000..484412aa
--- /dev/null
+++ b/documents/docs/agents/design/memory.md
@@ -0,0 +1,24 @@
+# Agent Memory
+
+The `Memory` manages the memory of the agent and stores the information required for the agent to interact with the user and applications at every step. Parts of elements in the `Memory` will be visible to the agent for decision-making.
+
+
+## MemoryItem
+A `MemoryItem` is a `dataclass` that represents a single step in the agent's memory. The fields of a `MemoryItem` is flexible and can be customized based on the requirements of the agent. The `MemoryItem` class is defined as follows:
+
+::: agents.memory.memory.MemoryItem
+
+!!!info
+ At each step, an instance of `MemoryItem` is created and stored in the `Memory` to record the information of the agent's interaction with the user and applications.
+
+
+## Memory
+The `Memory` class is responsible for managing the memory of the agent. It stores a list of `MemoryItem` instances that represent the agent's memory at each step. The `Memory` class is defined as follows:
+
+::: agents.memory.memory.Memory
+
+!!!info
+ Each agent has its own `Memory` instance to store their information.
+
+!!!info
+ Not all information in the `Memory` are provided to the agent for decision-making. The agent can access parts of the memory based on the requirements of the agent's logic.
\ No newline at end of file
diff --git a/documents/docs/agents/design/processor.md b/documents/docs/agents/design/processor.md
new file mode 100644
index 00000000..c91020c5
--- /dev/null
+++ b/documents/docs/agents/design/processor.md
@@ -0,0 +1,29 @@
+# Agents Processor
+
+The `Processor` is a key component of the agent to process the core logic of the agent to process the user's request. The `Processor` is implemented as a class in the `ufo/agents/processors` folder. Each agent has its own `Processor` class withing the folder.
+
+## Core Process
+Once called, an agent follows a series of steps to process the user's request defined in the `Processor` class by calling the `process` method. The workflow of the `process` is as follows:
+
+| Step | Description | Function |
+| --- | --- | --- |
+| 1 | Print the step information. | `print_step_info` |
+| 2 | Capture the screenshot of the application. | `capture_screenshot` |
+| 3 | Get the control information of the application. | `get_control_info` |
+| 4 | Get the prompt message for the LLM. | `get_prompt_message` |
+| 5 | Generate the response from the LLM. | `get_response` |
+| 6 | Update the cost of the step. | `update_cost` |
+| 7 | Parse the response from the LLM. | `parse_response` |
+| 8 | Execute the action based on the response. | `execute_action` |
+| 9 | Update the memory and blackboard. | `update_memory` |
+| 10 | Update the status of the agent. | `update_status` |
+| 11 | Update the step information. | `update_step` |
+
+At each step, the `Processor` processes the user's request by invoking the corresponding method sequentially to execute the necessary actions.
+
+
+The process may be paused. It can be resumed, based on the agent's logic and the user's request using the `resume` method.
+
+## Reference
+Below is the basic structure of the `Processor` class:
+:::agents.processors.basic.BaseProcessor
\ No newline at end of file
diff --git a/documents/docs/agents/design/prompter.md b/documents/docs/agents/design/prompter.md
new file mode 100644
index 00000000..aefd4a7b
--- /dev/null
+++ b/documents/docs/agents/design/prompter.md
@@ -0,0 +1,47 @@
+# Agent Prompter
+
+The `Prompter` is a key component of the UFO framework, responsible for constructing prompts for the LLM to generate responses. The `Prompter` is implemented in the `ufo/prompts` folder. Each agent has its own `Prompter` class that defines the structure of the prompt and the information to be fed to the LLM.
+
+## Components
+
+A prompt fed to the LLM usually a list of dictionaries, where each dictionary contains the following keys:
+
+| Key | Description |
+| --- | --- |
+| `role` | The role of the text in the prompt, can be `system`, `user`, or `assistant`. |
+| `content` | The content of the text for the specific role. |
+
+!!!tip
+ You may find the [official documentation](https://help.openai.com/en/articles/7042661-moving-from-completions-to-chat-completions-in-the-openai-api) helpful for constructing the prompt.
+
+In the `__init__` method of the `Prompter` class, you can define the template of the prompt for each component, and the final prompt message is constructed by combining the templates of each component using the `prompt_construction` method.
+
+### System Prompt
+The system prompt use the template configured in the `config_dev.yaml` file for each agent. It usually contains the instructions for the agent's role, action, tips, reponse format, etc.
+You need use the `system_prompt_construction` method to construct the system prompt.
+
+Prompts on the API instructions, and demonstration examples are also included in the system prompt, which are constructed by the `api_prompt_helper` and `examples_prompt_helper` methods respectively. Below is the sub-components of the system prompt:
+
+| Component | Description | Method |
+| --- | --- | --- |
+| `apis` | The API instructions for the agent. | `api_prompt_helper` |
+| `examples` | The demonstration examples for the agent. | `examples_prompt_helper` |
+
+### User Prompt
+The user prompt is constructed based on the information from the agent's observation, external knowledge, and `Blackboard`. You can use the `user_prompt_construction` method to construct the user prompt. Below is the sub-components of the user prompt:
+
+| Component | Description | Method |
+| --- | --- | --- |
+| `observation` | The observation of the agent. | `user_content_construction` |
+| `retrieved_docs` | The knowledge retrieved from the external knowledge base. | `retrived_documents_prompt_helper` |
+| `blackboard` | The information stored in the `Blackboard`. | `blackboard_to_prompt` |
+
+
+# Reference
+You can find the implementation of the `Prompter` in the `ufo/prompts` folder. Below is the basic structure of the `Prompter` class:
+
+:::prompter.basic.BasicPrompter
+
+
+!!!tip
+ You can customize the `Prompter` class to tailor the prompt to your requirements.
\ No newline at end of file
diff --git a/documents/docs/agents/design/state.md b/documents/docs/agents/design/state.md
new file mode 100644
index 00000000..4074e2e7
--- /dev/null
+++ b/documents/docs/agents/design/state.md
@@ -0,0 +1,120 @@
+# Agent State
+
+The `State` class is a fundamental component of the UFO agent framework. It represents the current state of the agent and determines the next action and agent to handle the request. Each agent has a specific set of states that define the agent's behavior and workflow.
+
+
+
+## AgentStatus
+The set of states for an agent is defined in the `AgentStatus` class:
+
+```python
+class AgentStatus(Enum):
+ """
+ The status class for the agent.
+ """
+
+ ERROR = "ERROR"
+ FINISH = "FINISH"
+ CONTINUE = "CONTINUE"
+ FAIL = "FAIL"
+ PENDING = "PENDING"
+ CONFIRM = "CONFIRM"
+ SCREENSHOT = "SCREENSHOT"
+```
+
+Each agent implements its own set of `AgentStatus` to define the states of the agent.
+
+
+## AgentStateManager
+
+The class `AgentStateManager` manages the state mapping from a string to the corresponding state class. Each state class is registered with the `AgentStateManager` using the `register` decorator to associate the state class with a specific agent, e.g.,
+
+```python
+@AgentStateManager.register
+class SomeAgentState(AgentState):
+ """
+ The state class for the some agent.
+ """
+```
+
+!!! tip
+ You can find examples on how to register the state class for the `AppAgent` in the `ufo/agents/states/app_agent_state.py` file.
+
+Below is the basic structure of the `AgentStateManager` class:
+```python
+class AgentStateManager(ABC, metaclass=SingletonABCMeta):
+ """
+ A abstract class to manage the states of the agent.
+ """
+
+ _state_mapping: Dict[str, Type[AgentState]] = {}
+
+ def __init__(self):
+ """
+ Initialize the state manager.
+ """
+
+ self._state_instance_mapping: Dict[str, AgentState] = {}
+
+ def get_state(self, status: str) -> AgentState:
+ """
+ Get the state for the status.
+ :param status: The status string.
+ :return: The state object.
+ """
+
+ # Lazy load the state class
+ if status not in self._state_instance_mapping:
+ state_class = self._state_mapping.get(status)
+ if state_class:
+ self._state_instance_mapping[status] = state_class()
+ else:
+ self._state_instance_mapping[status] = self.none_state
+
+ state = self._state_instance_mapping.get(status, self.none_state)
+
+ return state
+
+ def add_state(self, status: str, state: AgentState) -> None:
+ """
+ Add a new state to the state mapping.
+ :param status: The status string.
+ :param state: The state object.
+ """
+ self.state_map[status] = state
+
+ @property
+ def state_map(self) -> Dict[str, AgentState]:
+ """
+ The state mapping of status to state.
+ :return: The state mapping.
+ """
+ return self._state_instance_mapping
+
+ @classmethod
+ def register(cls, state_class: Type[AgentState]) -> Type[AgentState]:
+ """
+ Decorator to register the state class to the state manager.
+ :param state_class: The state class to be registered.
+ :return: The state class.
+ """
+ cls._state_mapping[state_class.name()] = state_class
+ return state_class
+
+ @property
+ @abstractmethod
+ def none_state(self) -> AgentState:
+ """
+ The none state of the state manager.
+ """
+ pass
+```
+
+## AgentState
+Each state class inherits from the `AgentState` class and must implement the method of `handle` to process the action in the state. In addition, the `next_state` and `next_agent` methods are used to determine the next state and agent to handle the transition. Please find below the reference for the `State` class in UFO.
+
+::: agents.states.basic.AgentState
+
+
+!!!tip
+ A `Round` calls the `handle`, `next_state`, and `next_agent` methods of the current state to process the user request and determine the next state and agent to handle the request, and orchestrates the agents to execute the necessary actions.
diff --git a/documents/docs/agents/evaluation_agent.md b/documents/docs/agents/evaluation_agent.md
new file mode 100644
index 00000000..13413420
--- /dev/null
+++ b/documents/docs/agents/evaluation_agent.md
@@ -0,0 +1,35 @@
+# EvaluationAgent ๐ง
+
+The objective of the `EvaluationAgent` is to evaluate whether a `Session` or `Round` has been successfully completed. The `EvaluationAgent` assesses the performance of the `HostAgent` and `AppAgent` in fulfilling the request. You can configure whether to enable the `EvaluationAgent` in the `config_dev.yaml` file and the detailed documentation can be found [here](../configurations/developer_configuration.md).
+!!! note
+ The `EvaluationAgent` is fully LLM-driven and conducts evaluations based on the action trajectories and screenshots. It may not by 100% accurate since LLM may make mistakes.
+
+
+## Evaluation Inputs
+The `EvaluationAgent` takes the following inputs for evaluation:
+
+| Input | Description | Type |
+| --- | --- | --- |
+| User Request | The user's request to be evaluated. | String |
+| APIs Description | The description of the APIs used in the execution. | List of Strings |
+| Action Trajectories | The action trajectories executed by the `HostAgent` and `AppAgent`. | List of Strings |
+| Screenshots | The screenshots captured during the execution. | List of Images |
+
+!!! tip
+ You can configure whether to use all screenshots or only the first and last screenshot for evaluation in the `config_dev.yaml` file.
+
+## Evaluation Outputs
+The `EvaluationAgent` generates the following outputs after evaluation:
+
+| Output | Description | Type |
+| --- | --- | --- |
+| Reason | The detailed reason for your judgment, by observing the screenshot differences and the . | String |
+| Sub-score | The sub-score of the evaluation in decomposing the evaluation into multiple sub-goals. | List of Dictionaries |
+| Complete | The completion status of the evaluation, can be `yes`, `no`, or `unsure`. | String |
+
+The `EvaluationAgent` employs the CoT mechanism to first decompose the evaluation into multiple sub-goals and then evaluate each sub-goal separately. The sub-scores are then aggregated to determine the overall completion status of the evaluation.
+
+# Reference
+
+:::agents.agent.evaluation_agent.EvaluationAgent
+
diff --git a/documents/docs/agents/follower_agent.md b/documents/docs/agents/follower_agent.md
new file mode 100644
index 00000000..4855366f
--- /dev/null
+++ b/documents/docs/agents/follower_agent.md
@@ -0,0 +1,28 @@
+# Follower Agent ๐ถ๐ฝโโ๏ธ
+
+The `FollowerAgent` is inherited from the `AppAgent` and is responsible for following the user's instructions to perform specific tasks within the application. The `FollowerAgent` is designed to execute a series of actions based on the user's guidance. It is particularly useful for software testing, when clear instructions are provided to validate the application's behavior.
+
+
+## Different from the AppAgent
+The `FollowerAgent` shares most of the functionalities with the `AppAgent`, but it is designed to follow the step-by-step instructions provided by the user, instead of does its own reasoning to determine the next action.
+
+
+## Usage
+The `FollowerAgent` is available in `follower` mode. You can find more details in the [documentation](). It also uses differnt `Session` and `Processor` to handle the user's instructions. The step-wise instructions are provided by the user in the in a json file, which is then parsed by the `FollowerAgent` to execute the actions. An example of the json file is shown below:
+
+```json
+{
+ "task": "Type in a bold text of 'Test For Fun'",
+ "steps":
+ [
+ "1.type in 'Test For Fun'",
+ "2.select the text of 'Test For Fun'",
+ "3.click on the bold"
+ ],
+ "object": "draft.docx"
+}
+```
+
+# Reference
+
+:::agents.agent.follower_agent.FollowerAgent
\ No newline at end of file
diff --git a/documents/docs/agents/host_agent.md b/documents/docs/agents/host_agent.md
new file mode 100644
index 00000000..dacb86d7
--- /dev/null
+++ b/documents/docs/agents/host_agent.md
@@ -0,0 +1,140 @@
+# HostAgent ๐ค
+
+The `HostAgent` assumes three primary responsibilities:
+
+1. **User Engagement**: The `HostAgent` engages with the user to understand their request and analyze their intent. It also conversates with the user to gather additional information when necessary.
+2. **AppAgent Management**: The `HostAgent` manages the creation and registration of `AppAgents` to fulfill the user's request. It also orchestrates the interaction between the `AppAgents` and the application.
+3. **Task Management**: The `HostAgent` analyzes the user's request, to decompose it into sub-tasks and distribute them among the `AppAgents`. It also manages the scheduling, orchestration, coordination, and monitoring of the `AppAgents` to ensure the successful completion of the user's request.
+4. **Communication**: The `HostAgent` communicates with the `AppAgents` to exchange information. It also manages the `Blackboard` to store and share information among the agents, as shown below:
+
+
+
+
+
+
+The `HostAgent` activates its `Processor` to process the user's request and decompose it into sub-tasks. Each sub-task is then assigned to an `AppAgent` for execution. The `HostAgent` monitors the progress of the `AppAgents` and ensures the successful completion of the user's request.
+
+## HostAgent Input
+
+The `HostAgent` receives the following inputs:
+
+| Input | Description | Type |
+| --- | --- | --- |
+| User Request | The user's request in natural language. | String |
+| Application Information | Information about the existing active applications. | List of Strings |
+| Desktop Screenshots | Screenshots of the desktop to provide context to the `HostAgent`. | Image |
+| Previous Sub-Tasks | The previous sub-tasks and their completion status. | List of Strings |
+| Previous Plan | The previous plan for the following sub-tasks. | List of Strings |
+| Blackboard | The shared memory space for storing and sharing information among the agents. | Dictionary |
+
+By processing these inputs, the `HostAgent` determines the appropriate application to fulfill the user's request and orchestrates the `AppAgents` to execute the necessary actions.
+
+## HostAgent Output
+
+With the inputs provided, the `HostAgent` generates the following outputs:
+
+| Output | Description | Type |
+| --- | --- | --- |
+| Observation | The observation of current desktop screenshots. | String |
+| Thought | The logical reasoning process of the `HostAgent`. | String |
+| Current Sub-Task | The current sub-task to be executed by the `AppAgent`. | String |
+| Message | The message to be sent to the `AppAgent` for the completion of the sub-task. | String |
+| ControlLabel | The index of the selected application to execute the sub-task. | String |
+| ControlText | The name of the selected application to execute the sub-task. | String |
+| Plan | The plan for the following sub-tasks after the current sub-task. | List of Strings |
+| Status | The status of the agent, mapped to the `AgentState`. | String |
+| Comment | Additional comments or information provided to the user. | String |
+| Questions | The questions to be asked to the user for additional information. | List of Strings |
+| AppsToOpen | The application to be opened to execute the sub-task if it is not already open. | Dictionary |
+
+
+Below is an example of the `HostAgent` output:
+
+```json
+{
+ "Observation": "Desktop screenshot",
+ "Thought": "Logical reasoning process",
+ "Current Sub-Task": "Sub-task description",
+ "Message": "Message to AppAgent",
+ "ControlLabel": "Application index",
+ "ControlText": "Application name",
+ "Plan": ["Sub-task 1", "Sub-task 2"],
+ "Status": "AgentState",
+ "Comment": "Additional comments",
+ "Questions": ["Question 1", "Question 2"],
+ "AppsToOpen": {"APP": "powerpnt", "file_path": ""}
+}
+```
+
+!!! info
+ The `HostAgent` output is formatted as a JSON object by LLMs and can be parsed by the `json.loads` method in Python.
+
+
+## HostAgent State
+
+The `HostAgent` progresses through different states, as defined in the `ufo/agents/states/host_agent_states.py` module. The states include:
+
+| State | Description |
+| --- | --- |
+| `CONTINUE` | The `HostAgent` is ready to process the user's request and emloy the `Processor` to decompose it into sub-tasks and assign them to the `AppAgents`. |
+| `FINISH` | The overall task is completed, and the `HostAgent` is ready to return the results to the user. |
+| `ERROR` | An error occurred during the processing of the user's request, and the `HostAgent` is unable to proceed. |
+| `FAIL` | The `HostAgent` believes the task is unachievable and cannot proceed further. |
+| `PENDING` | The `HostAgent` is waiting for additional information from the user to proceed. |
+| `CONFIRM` | The `HostAgent` is confirming the user's request before proceeding. |
+
+The `HostAgent` transitions between these states based on the user's request, the application information, and the progress of the `AppAgents` in executing the sub-tasks.
+
+## Creating and Registering AppAgents
+When the `HostAgent` determines the need for a new `AppAgent` to fulfill a sub-task, it creates an instance of the `AppAgent` and registers it with the `HostAgent`, by calling the `create_subagent` method:
+
+```python
+def create_subagent(
+ self,
+ agent_type: str,
+ agent_name: str,
+ process_name: str,
+ app_root_name: str,
+ is_visual: bool,
+ main_prompt: str,
+ example_prompt: str,
+ api_prompt: str,
+ *args,
+ **kwargs,
+ ) -> BasicAgent:
+ """
+ Create an SubAgent hosted by the HostAgent.
+ :param agent_type: The type of the agent to create.
+ :param agent_name: The name of the SubAgent.
+ :param process_name: The process name of the app.
+ :param app_root_name: The root name of the app.
+ :param is_visual: The flag indicating whether the agent is visual or not.
+ :param main_prompt: The main prompt file path.
+ :param example_prompt: The example prompt file path.
+ :param api_prompt: The API prompt file path.
+ :return: The created SubAgent.
+ """
+ app_agent = self.agent_factory.create_agent(
+ agent_type,
+ agent_name,
+ process_name,
+ app_root_name,
+ is_visual,
+ main_prompt,
+ example_prompt,
+ api_prompt,
+ *args,
+ **kwargs,
+ )
+ self.appagent_dict[agent_name] = app_agent
+ app_agent.host = self
+ self._active_appagent = app_agent
+
+ return app_agent
+```
+
+The `HostAgent` then assigns the sub-task to the `AppAgent` for execution and monitors its progress.
+
+# Reference
+
+:::agents.agent.host_agent.HostAgent
diff --git a/documents/docs/agents/overview.md b/documents/docs/agents/overview.md
index e69de29b..d6017e1a 100644
--- a/documents/docs/agents/overview.md
+++ b/documents/docs/agents/overview.md
@@ -0,0 +1,37 @@
+# Agents
+
+In UFO, there are four types of agents: `HostAgent`, `AppAgent`, `FollowerAgent`, and `EvaluationAgent`. Each agent has a specific role in the UFO system and is responsible for different aspects of the user interaction process:
+
+| Agent | Description |
+| --- | --- |
+| [`HostAgent`](../agents/host_agent.md) | Decomposes the user request into sub-tasks and selects the appropriate application to fulfill the request. |
+| [`AppAgent`](../agents/app_agent.md) | Executes actions on the selected application. |
+| [`FollowerAgent`](../agents/follower_agent.md) | Follows the user's instructions to complete the task. |
+| [`EvaluationAgent`](../agents/evaluation_agent.md) | Evaluates the completeness of a session or a round. |
+
+In the normal workflow, only the `HostAgent` and `AppAgent` are involved in the user interaction process. The `FollowerAgent` and `EvaluationAgent` are used for specific tasks.
+
+Please see below the orchestration of the agents in UFO:
+
+
+
+
+
+## Main Components
+
+An agent in UFO is composed of the following main components to fulfill its role in the UFO system:
+
+| Component | Description |
+| --- | --- |
+| [`State`](../agents/design/state.md) | Represents the current state of the agent and determines the next action and agent to handle the request. |
+| [`Memory`](../agents/design/memory.md) | Stores information about the user request, application state, and other relevant data. |
+| [`Blackboard`](../agents/design/blackboard.md) | Stores information shared between agents. |
+| [`Prompter`](../agents/design/prompter.md) | Generates prompts for the language model based on the user request and application state. |
+| [`Processor`](../agents/design/processor.md) | Processes the workflow of the agent, including handling user requests, executing actions, and memory management. |
+
+## Reference
+
+Below is the reference for the `Agent` class in UFO. All agents in UFO inherit from the `Agent` class and implement necessary methods to fulfill their roles in the UFO system.
+
+::: agents.agent.basic.BasicAgent
+
diff --git a/documents/docs/automator/overview.md b/documents/docs/automator/overview.md
new file mode 100644
index 00000000..e69de29b
diff --git a/documents/docs/configurations/developer_configuration.md b/documents/docs/configurations/developer_configuration.md
index d7af9097..2d983e9c 100644
--- a/documents/docs/configurations/developer_configuration.md
+++ b/documents/docs/configurations/developer_configuration.md
@@ -14,7 +14,7 @@ The following parameters are included in the system configuration of the UFO age
| `RECTANGLE_TIME` | The time in seconds for the rectangle display around the selected control. | Integer | 1 |
| `SAFE_GUARD` | Whether to use the safe guard to ask for user confirmation before performing sensitive operations. | Boolean | True |
| `CONTROL_LIST` | The list of widgets allowed to be selected. | List | ["Button", "Edit", "TabItem", "Document", "ListItem", "MenuItem", "ScrollBar", "TreeItem", "Hyperlink", "ComboBox", "RadioButton", "DataItem"] |
-| `HISTORY_KEYS` | The keys of the step history added to the blackboard for agent decision-making. | List | ["Step", "Thought", "ControlText", "Subtask", "Action", "Comment", "Results", "UserConfirm"] |
+| `HISTORY_KEYS` | The keys of the step history added to the [`Blackboard`](../agents/design/blackboard.md) for agent decision-making. | List | ["Step", "Thought", "ControlText", "Subtask", "Action", "Comment", "Results", "UserConfirm"] |
| `ANNOTATION_COLORS` | The colors assigned to different control types for annotation. | Dictionary | {"Button": "#FFF68F", "Edit": "#A5F0B5", "TabItem": "#A5E7F0", "Document": "#FFD18A", "ListItem": "#D9C3FE", "MenuItem": "#E7FEC3", "ScrollBar": "#FEC3F8", "TreeItem": "#D6D6D6", "Hyperlink": "#91FFEB", "ComboBox": "#D8B6D4"} |
| `PRINT_LOG` | Whether to print the log in the console. | Boolean | False |
| `CONCAT_SCREENSHOT` | Whether to concatenate the screenshots into a single image for the LLM input. | Boolean | False |
@@ -24,7 +24,7 @@ The following parameters are included in the system configuration of the UFO age
| `USE_APIS` | Whether to allow the use of application APIs. | Boolean | True |
| `ALLOW_OPENAPP` | Whether to allow the open app action in `HostAgent`. | Boolean | False |
| `LOG_XML` | Whether to log the XML file at every step. | Boolean | False |
-| `SCREENSHOT_TO_MEMORY` | Whether to allow the screenshot to memory for the agent's decision making. | Boolean | True |
+| `SCREENSHOT_TO_MEMORY` | Whether to allow the screenshot to [`Blackboard`](../agents/design/blackboard.md) for the agent's decision making. | Boolean | True |
## Main Prompt Configuration
diff --git a/documents/docs/img/appagent.png b/documents/docs/img/appagent.png
new file mode 100644
index 0000000000000000000000000000000000000000..83b078e4a47f7c36c29a61a26b23f06013b37858
GIT binary patch
literal 192024
zcmeFZc|6o>|3B{BopL%^PN&6AJ6TKCu}mEy2|3oWCMMbU7;EK3B_``cc8M~`*cnu+
z!NkZivW>(T42Bs)4C8x^bME{5d3=xV`}6z#eb3|3BWBF|dR?#O`C6~*!Bt~@-k*hk
z=HlYwHMn%~cP_5I6fQ1qub=jScP5o@rGo!{2>4z9H?FcS(HZc|kFFPtE^u*GAop!J
z?FPT^^}PfO;Nntz&H49(l#9(U7uQyy!Nm*aAr5n!u1`dnsf$ri+Ln)ZSV1stG_2{(
z4<|73Mrn_gj^90N>h{dE>yD?0h_mQV4kEs<_lR(Niu&4#9N^t|YVV1IriVNJ@y9O)
zu;ebYqN1ON-oJS1cyD1QgB@8;4dBgqQP!tLB4q?vY;E2D{%dw<%PmU^5XyPoo|ra?yd-Z4d91H9M!?$d!@yfc2$m`&jZ!|G>qy%>Q=;I4?A_
zXm7{!3zJu7fO5=T@QU+i3_HIk&)Ne47D(9vOs?vg26vWpg^If4AC6=FA3hw{1hli?
zw76Wk_}5DpZvO{ZPwF^IaQY=wCWg^*S~`XSp101u1HqnOW9nW8i2bbNhqlX!twT!J
zq~SE)wj6+*M+N7eon1)TaQGhagLUE0ynYF`D^Vy>YPG8tRP@+miHMzE#>Pes2z~^}
zHx1n|{P0h*%*~0qf`C>`#lr@waO*`T{C&XHFw4wfK-s$;gwt`6JK^ojXw$N*>AitF
zLlxIAp;ypqH1OB=caT)d17>2)n_rTY>xz~tikZ&uA6w7E{78WGUvN9{=0M38*YGN@
zvC6L-1KEctV&JKMJJH(hRN(tYe`rtsONTwx|LLJ|b;v4S>s{eCj^S0M3vt!ZP|@|x
zP+GFLNv%kbcq+3_3H{L8Xa8O2s)Uwl%83O#kM+%KPl*$~7-{#>>fdPrDVBZBOtLm0
z`e6GGMDHtERD>)#Tf94C6fz2V<}6n<>~A!X=z2s}Q`4o?u*H>ENH>*Y2P=f-ROcYN
zke3#Q?LESSAd+1NB+vyYymTic^CH2G
zI5DtXAx|(Q&6}gDv^Eim@3mA-hyB$h)AGBLsf=H5Y_csSqKE+{EVCxS`P-Pqznf;q
zYiajaZNhIqVfYU1Ml<(NKSDm6*{5ypv(l1E3I0VI-<@dD$8f1wQO8FfTnhj3y_SNm
zx1^MJm}&VunzJ76aBY<7Y(%y!v3_3yl*Wc5;{O4RPUA3BY-lz@m2`X^K=vGOU}nUM-^TXw7ZsEV3>TwLA7
zJLsF9EANLByl*eU<)|IP3lA=@5`;y2XzzK~pNTCRBrmj7Cp??De2xo#sM_8zjRKLS
zuEh)YJPdo3Do^w8`q~vXqTZWx4>l8Njkl~MO5BG?xH<5fQh(}+xNn=o)?8TYL*rn9
z_T%|Pv1&&m2@P8xD;xglV%Dk>4vnT`i57j$h3X%UW%D1Z
z6(0y7(^*uys9u(U98H~@i>p|22Z*Fj+niBok)F^cv-r$;cHhq
z)elxEWR2-E*iSj$$;f|6&>dNc-ZW}tyA#iP5HY4`I^RRxCUHBY^6ZjL8sktUl@T-*rwnUHS0@gzJC>@xsezTA<%r{jk#vhP$Cqsgu4o=hKVc%8UmdTb&1S+xXR
z&N!bvj>VQ2NSU@o%aac+gUn{pl%d*;^Zc71Vx`WvZJOKHVp5~zU-a*
zyij*BoyUFmL(#SeHmGV(VN1lG9y%X4S>4srZAFqAAYM&C5OkZY3kQQsu(&cptp#!5
z&JDSzX-wfyNeQa6%Gc$n$7VND$jU$zU;gfPm846mq7`)cx({lN9Jt?Mqw-Oj=6kiz
zuK_(E-N`opqQ3Uq~=EOfK#awQZN%b;hrhJIoQznAVrRKDz|o^SDylaTrv^7UdVbS5Z&xE2GR
zZ_(!mfTS;D2bSMQJC&ey6ijv04RrLJss$HJO{3xREZ5XvH7V+1G6pO{toI%C?o+Qy
zE9KUTH7yqy;0V{Ii7&|kWs1Z%Li^E44w9yX*lM&)$uZ;s0LW=ODUaFmFGi-Vzilr;
zH4?u2yQMAED#&|C1-<#7FZ
z6o*hO$ARWL>_{k~hli*CdYwL5=mtjmx>_3uSe{lBbW{HIEUR}^#cTifS^s9!zbc!A4W*q5Xn6fw^
z{jzxCM}T2Fd6mRv-Lh7tZ3)5fF)*?i-$RHoifL0gn1gOb5GRy82J2G5*=%@LLE^~KLQFLen3W}hpaHP3X$O}df6Vc-$k3ZRduI@&~
zEc@CzO2jf@=grmM`{z^O9x?rWUi3dU966s-
z9e%{Ut_Y%a7&Ts)zXxUKua4B^eE=&&V1pd&V$03#%5>mFhNql>MK*~3Rn!%BU)swWf8qRu@OpbR+-T_AkK%gmi+1xK$+o09FAE?JWURgIc?l8MJ
zk(h@f;XP>FMG$#hAvRC10uLuoQJvMCCggYBs}3JQ#D$5?5R$s;0a(poOku42F+-ac
zdFMmHo=yfhq1BN+EJ3S`2#((Zilamj;lu*;zsCaWzxTbqGSY3lALFUIaMpB&@DGFc
zUK$TAdTM&+%reF$;i)&Q+lM4Kv#UK1q9%Io20X3`A6c2`!Oc`q)p_=G<~!&RS9>#6
z4_Wn9eDKm)9h9E;IEL*k&R;nz4Z=u=l;Qv%j-=-qAZzTl+DXy}mG7HTe?S$#IWFh^
zP0G6t7nCdYvMO5d2bobq-9&)g{4Z&k%M3dk%2|AUJ1g8=AXxk%gg^}KVe`4DPt|!{
z6tf*!yZ`$nBM*)+dbi2G0Z(1o7O~7uB7WFmQ*+Z)mv`U<@2ZQ~`3}XK2VecX0wsL0
zPWwT2d$*8S7{R2ELC)|VAZxm45&CiP(&L?oeby9(vbu>lh&(Fbxa=q&&r!0LWyvCN@{zVP%~nvA(#4t{?Hpo2
z5=FEyLR-mRd;Ye?(7ZM7U}QLPqN`D&|1GsA$`)VWK@b12Fv9pP+KXae6eJ7F;j9_D
z17LnR<=!Ue4ig*M%TJqbVY=$_E?UEO={Gc8%XztP+ZzQKzLJ!G9^7Es;@z*L0tf3GUETPuUx7SDHph$t%7CznGjAfGq+;x?=?N&R$p=O
z*=_dQzo&M$MYjDi*RsIhMZ-06CKH57T)8bfq)A_5IQc&p*XF&Q$l{@ZRFZ32;qThT
zRp+DzVt=mlQR9CPzh3jUa>@@}>-+v5nsa#zb^){H>UP}0Y5f1+V)$$$6eUnYTh#v&
zN~K=4IrKH%zn|92j5f1o6=!SbcsQ_Kz+2|N@U8VIx@GF=N;Vtl(jhFD}n%xk?soIZeso7p~0h7Fh=CpbYp4IS%Mf#QCXmg`o
zLMs@7ADh)$i6PGTr`DF8CP89rjT567L?15!P&AYKw?^zf@1wa@9+V_Hu3iy@$@~P-
z9kMCvfS2}8K2H;(A192;og<4ATIDJy-H?1^rA%%{;1M4ayq)?N1fE4hX^Ch->T0_g
zb+sG2NX}b027@T%-$-d7&M5ZUZmp4S#e@vAifc#t!-lttx|@ICy1ib>0fh6_0yUn=
zv&&;d|MN|-oxtO^4eKiPgnCKPL%TtUQDh%PC)+dqq^-?V+tXC1#(K=KMDsB9Q{%+^
z>huxP1;SC2*rMKEa#Vj5b(KkJ$doTf_Ts3afe}8TfyF*q6m6d{)mmeeTd5g2h1gCv
z88PMOx~&c0%oE|d?LIEprBm>VG@O$9H%F|?A`lJyZKwx@h9T98k5`$*nN*D-Eux|!
z5y)gzELu$UBk9{#DAfSw496z
zoqG{HS}k{}r5rjyM&nwaQ)It{F1$UP8LQ{CBRW4^RwzxDmN*#}K;?mbhS)lIq?^?b
zXYuvU_}V-6zR@0)*sL4jwsmbMm-b3aRO3gNK`JZ(bVZ6=?BxsNSh(Qtp|y*MpRd;y
z`{`=A`t6PmC9W^>X_<`Mb=jMs-V}S5@HT|2F_R?+`^*FAKt4xtS%cTHW;+THTdo
zAy-K*O;c|QezR}Er6{<@qp42uh``9+a$=xHYUA8Y8A&(
zuAms$&8^@V4pr)ZQdPVc=iAc=qny6a=(i4(A_$8cis-!jgtpr=|8Mw!2`Sf)rL*gU
zk}^jiFnRCvQ~XsQQwD;Llq>QSMTam&-WrxPzTdJQIVWGTu?q2Np)IdOUBsKKzts@e
zDrl((w@oM7{A;mwA>Lk=Je0hTJ96Q6-0C0fO`p=_$vNU_dOt8s
z6Z{oKGrVTbJ>5(wyo9Hze~k~}))b)ML~2}{XRw<_hqoxem`A?vKb`t}eyzULLy~`z{|-(?5q^XZbpOQ_e4!@K8@l-qDp`
znuy4DwvohtCcf~u1==vIXbTDKE~Qsgqb{4^H3J085}ACnGxp2!htwHABP!tunnQ|)
zWnNkGt+qvN<8sN961H;;5RLW#+l}|Xrlyc?m|59;NcBmPjzw9tIq|VLVZQMmp_Z!7
zXO%Xt$qvSBRqkj_ZkdA+-DZ-aPFmH+G*w{Bkx{RWXulN&ckJVDPQAxBJ;SCxb|_jM
z^FiQhnt~EE`vD!|iWbH~JuK7aH;A9c41VF_s{50S=@Z$?%kYSDSslI2L}vJp^*oR6
z%{ae|UH%p!Wvmv%_>P9&j@$CgI+1fT>)4ok`cXsM8>=L@1+_EcOwf
z25W`re}CQcNi9BECwgs)+9OIUC*P;3r~aVQ)N2{|#*UX%9hQQ`N@FVK%Pq
zGI9Z-&~@e`!!dEY%9x?`IMNUA>W{Tfwc8eTKGwyVJeG(=+dt>YkkaRW>XNO%#r62(
zUu)hZez9V{O0!^2n1+FME7Nqx_ThZRh41aCXRkK0D6aro22lfHy^6Lg)ypJ^TWVLltq=3x_4tEybac`F4IoClEQok>~)BAAN;IP3x)mg-&0L9YlLgUo|e^
zueD~`Zw7drK@)KYGYN||%rT}~(VLpmc`9KRQKaq>@_PwI`{DzLiH#P)!m-8S?b{h`
zYkd7fh<0h4b6?OOC9Cy1UORi1S1qemRmO?7wyteo(+2%>;boVo?+j=k{O2umGNRHomvPdWmvL%akzex2RXIvL^
z%yxbOQr^&>OhQ$(V?%h*IF8y`{N4t507AQ-{wc$0$?D(H{OmEn8U
zo6mYgU77Wr8PyI-^3*0H*xypmtMfTFlI;ei$>a~j8B7VhYgoc*_>m6c&H}N%=d5SK
zoRFqjO_V1_R_ybsk_16Xf`qDrcEs#OL`yJzh>Enz1ZC=N
zYc;JLX_K;Dzl~4uX2+okg_d~Axmj$TxKAQIv%uGF1)8a?0)C%rh@h^vk}Jl0uzGZH
z+aQBsxnlzX4?gUi!}Nt|?E=U+LUhBF!E4$U4L9?!Q%%ke%3{VT;{tfw=o=lg1;g-9
z_CzT7eq
zlWtdQ?=hagV%Q=f-}&0K+Lzd%ycy}M9UUN?N^SmQAP-rYEeRU9ETaE~^#G4B#7tnDRy(U0-SrGs!t=n~-xtCPCCnsLK|TYjl`T
z^mxnGrN;27l)ZFX>l_2Lzer9XL?U0~9el@IYHld)eO{OQCR*63`2CP7D4(G*m^8zr
zl0EjGOEKJOj#aswlaP=(N)dUn}}?R;YkbHfML(s65j@X
zqQ8?VYnJ}4c;Mj;IiCvR__}7E=#IYhHOp4AeK&~P@`y11TwU6)swU4;HKy(bg;6G7
zKOUsdw)eVhR!qpWA_!tCHiu^(lM=n?CCC~)tjHuKir!Tel>GzOd4+$Pu3tUR>_y&A
zYdkSo`6m}Sfmdk+#kvDl*+-vacU{TAGhX1W>orwML9g|MheVYU?y0MoUp-Jb%BRpK
zmqAyQmu^#;9nzF+9~*>)MdCiy?MU=_TfF(2J@EN2;0aMU%{>g+P{}t5P5~}sUtk#K
zH-pr1?C#k3m*Mu>zUU2_sh9u6=>z1`uf9ha`-%uAYn61uIvvljSc-=t`KJv0Tw5d)sObrzwH@hk`
ztL!Z&azl$qR{ssLV~*a9UFP#CssaXUbE9n~O?}&7)mjk-kl4;dlu70>L+*scd$I9l
zJQO(wl(h0@;!i}pon}CfiWvsIquV!JIV$qlb^4w3{OijFyiL}uWcc)@%CtD=RQOK@
z5(ngGGHr0Bg<>jk!Yi1x&g1yAhua~Juxyz_K?|ku5*U+AEwpS!Sh_i)evm;8#0O#J
zkviWa*VsKv*3mm%9ysneo|b3+1DYO!ytHm)&HNI;mJGC}zbE;)SCE=do@@KBY@@u9
zrk=4lSQ6Uo{V=3?m+7u}1Iot^8+ilMa+D1ZMK?>t113OZq*d{EKuDp@q^n^&mN%4&
zRzsdw8K>&XRl29k+Rg?3+68Yr`BAvJ9g}^`n)NkQ9?f{NqV^W9#Shz)f!+)js=-W0
z#s+91LDPw(Co1EiZe?kJJ5a_`j`B5Nd3s3_7@nmjm1(7x%aOTDJL^F>yDQ)#^~Bl@
z>T-wv=MEuMdh|_RrzfC2PQ(Otj}B?%)`oRo8T8*c0rQ*uTpP<+<_mRyzkkAI^y~z=
zA^B7rwDvOr#E6~SvBOb9w?Un3>jmPqtHh%PSad+*jq{)r$7NEdNqGYp`FQnn2^nEj
zkC6@)F?Cx9mm3V4Y~C1pO|!&5R;tPW0KO!ar9rxtvT@su)n=1xSG^_Zn83Z;`g*SM>}og
zA6EOgb%DK6&Rshc%!lE#g`tDU>W2+U0a^OlW9d?`E?Au+*5PfTmLB+P-jZh#!o*g^U#g>c~xyNjWky)~gu%^alTh%oxs
zSA(fe=iHh;i(aIjdo#LZLouKra+0DCw8(Pbh%#Dn46hV@5?LTB%s|X&vM?Qs4o$Rt
zfb9%XK}FkJS&BwPaSf70mk4vkH#-tL|0`ijy#Lggh7*)5Kl>#Jnd1#D|_RWZCS+|5JkVXWfbZ
zZq_H~!{o99ln(DCzVnW(rvW$Gpb1!WHtk!<^?{F*Wv
zL~j&Ig_e>^C1s6g+PU?;n29N_
zyhpnTW{tC2R>_}S%X*MR&b)^Y*dVT<-&0T84LSFr$vv0C&@T@Y)P>`AikNd9NC6*Q
z)?)Wj+PB13g%DfI3hmtTjRt%z>niHA4I1RSrweHlEt_HL<6)~E?a=kg%#})<%c4R~
z%A1zZ%f>Ath#7?DS}>tQC|K)MfX_zZhWFIKWOq91Ax~v2Vf_)`zV%(vv3ibjVJ*wY
z5%VS{cteGj)3&-UajwH}ybJff6Ao1FX?XJlIcmrM~4JVT2pP(blJ_{D`!PC4SgH0C4)urU|(?0q
z%lEHR-`iMyBD|>P?F5}UM5JrJi@NK0$71MS)QX_8N4*j{X{G7=dXdIowXGjTnZ60%WIpM1@~oaa@n@&>!2Q6EW4`adCMA;pn*W%>|};txA@Mcj|?1EOKU(
zR)Fj9F&0#aj{V#s$;G84^3N!19K*vsbM;DHK6ic6^on~HUY*x!`BiGQ-Rn@XgX)=-
z^^D7{FAopzEqT8Lhq-P8=E?a=@VPJt_uZE=3o3SWQ|{wU0579N5ifYKQ(>t{izeSs
z#%^QMir!|ZN5h{_JF5n^eHP%$35GRmh
zP4kcIe)j$NBVxVomb%a4kBrsb04|*W2VCSfi+5GlIL6r}oVFt}ehIn)=J3$1mWM}a
zzSV**m>M(WKtfO%-CGr}z7G{7F}nsdqKaS%b3{qwE2OC*9!nW|V--6UPY%vBxf&&}
znrtPBMvP|p57{F-()ml1_T_{XDxOdohi@5s+Xh}LZw$VqpmZtbYy3!$+yN_EvdC7Q
zo%lLkVX`y;D(4TJqWBH|tp5J)!5*!fdgKB({wpn}qz$$NITbDfXW4R()jK=%`I2@)
zGFP=`>T=6t8PD4=$3vmbMB$HS)C5rlg6QBWRJP)U1ox~HQ9c_Bp#65`pApp`ctuec
zUi_q3;N^nDI;unn008jx(O{Pn&$G)l8fGu7K=uIRO|q_Nz5Tc1G==PCqVE4u+fc
zC!r@w-+Ks0;6i=OEdSqb>eZChY-J0!*N_1Do6n|kSEP#<{F^Z}yC%YrKRP60I^XM9
zveiOjP9)6