diff --git a/agentuniverse/agent_serve/web/request_task.py b/agentuniverse/agent_serve/web/request_task.py index 5f32ae8b..3cb13720 100644 --- a/agentuniverse/agent_serve/web/request_task.py +++ b/agentuniverse/agent_serve/web/request_task.py @@ -73,8 +73,13 @@ def receive_steps(self): ensure_ascii=False) + "\n\n" if self.canceled(): return - yield "data:" + json.dumps({"result": self.thread.result()}, - ensure_ascii=False) + "\n\n" + try: + result = self.thread.result() + yield "data:" + json.dumps({"result": result}, + ensure_ascii=False) + "\n\n " + except Exception as e: + LOGGER.error("request task execute Fail: " + str(e)) + yield "data:" + json.dumps({"error": {"error_msg": str(e)}}) + "\n\n " def append_steps(self): """Tracing async service running state and update it to database.""" @@ -225,7 +230,7 @@ def finished(self): self.__request_do__.state = TaskStateEnum.FINISHED.value @staticmethod - def query_request_state(request_id: str) -> dict|None: + def query_request_state(request_id: str) -> dict | None: """Query the request data in database by given request_id. Args: diff --git a/agentuniverse/agent_serve/web/thread_with_result.py b/agentuniverse/agent_serve/web/thread_with_result.py index 96414c9c..4d0d030e 100644 --- a/agentuniverse/agent_serve/web/thread_with_result.py +++ b/agentuniverse/agent_serve/web/thread_with_result.py @@ -30,6 +30,9 @@ def run(self): self._return = self.target(*self.args, **self.kwargs) except Exception as e: self.error = e + finally: + if 'output_stream' in self.kwargs: + self.kwargs['output_stream'].put('{"type": "EOF"}') def result(self): """Wait for target func finished, then return the result or raise an diff --git a/docs/guidebook/en/2_2_2_LLM_component_define_and_usage.md b/docs/guidebook/en/2_2_2_LLM_component_define_and_usage.md index 1d2f76b9..bf49694f 100644 --- a/docs/guidebook/en/2_2_2_LLM_component_define_and_usage.md +++ b/docs/guidebook/en/2_2_2_LLM_component_define_and_usage.md @@ -467,7 +467,7 @@ module: 'agentuniverse.llm.default.default_openai_llm' class: 'DefaultOpenAILLM' ``` -If we need to configure and define an LLM instance based on the `gpt-3.5-turbo model`, with a maximum token limit of 1000 and a retry count of 2 for failures, the configuration would be as follows: +If we need to configure and define an LLM instance based on the `gpt-3.5-turbo model`, with a maximum token limit of 1000 and a retry count is 2, the configuration would be as follows: ```yaml name: 'demo_llm' description: 'demo openai' diff --git "a/docs/guidebook/zh/2_2_2_\346\250\241\345\236\213\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" "b/docs/guidebook/zh/2_2_2_\346\250\241\345\236\213\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" index 77f90a8b..1d513e6e 100644 --- "a/docs/guidebook/zh/2_2_2_\346\250\241\345\236\213\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" +++ "b/docs/guidebook/zh/2_2_2_\346\250\241\345\236\213\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" @@ -462,7 +462,7 @@ module: 'agentuniverse.llm.default.default_openai_llm' class: 'DefaultOpenAILLM' ``` -假如我们需要配置定义一个基于`gpt-3.5-turbo`模型,最大token限制为1000,失败重试次数未2次的LLM实例,其配置如下: +假如我们需要配置定义一个基于`gpt-3.5-turbo`模型,最大token限制为1000,失败重试次数为2次的LLM实例,其配置如下: ```yaml name: 'demo_llm' description: 'demo openai'