diff --git a/.devcontainer/post_create_command.sh b/.devcontainer/post_create_command.sh index b0322dd2b2da59..e80f9d30aadc02 100755 --- a/.devcontainer/post_create_command.sh +++ b/.devcontainer/post_create_command.sh @@ -7,5 +7,6 @@ echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc +echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc -source /home/vscode/.bashrc \ No newline at end of file +source /home/vscode/.bashrc diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 282afefe74243a..b5e63a8870baa8 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -37,6 +37,7 @@ jobs: - name: Ruff check if: steps.changed-files.outputs.any_changed == 'true' run: | + poetry run -C api ruff --version poetry run -C api ruff check ./api poetry run -C api ruff format --check ./api diff --git a/api/constants/__init__.py b/api/constants/__init__.py index 05795e11d7dcc5..4500ef4306fc2a 100644 --- a/api/constants/__init__.py +++ b/api/constants/__init__.py @@ -14,11 +14,11 @@ if dify_config.ETL_TYPE == "Unstructured": - DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "pdf", "html", "htm", "xlsx", "xls"] + DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"] DOCUMENT_EXTENSIONS.extend(("docx", "csv", "eml", "msg", "pptx", "xml", "epub")) if dify_config.UNSTRUCTURED_API_URL: DOCUMENT_EXTENSIONS.append("ppt") DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS]) else: - DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "pdf", "html", "htm", "xlsx", "xls", "docx", "csv"] + DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls", "docx", "csv"] DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS]) diff --git a/api/core/app/features/rate_limiting/rate_limit.py b/api/core/app/features/rate_limiting/rate_limit.py index 154a49ebda2b88..8fe1d96b37be0c 100644 --- a/api/core/app/features/rate_limiting/rate_limit.py +++ b/api/core/app/features/rate_limiting/rate_limit.py @@ -110,7 +110,7 @@ def __next__(self): raise StopIteration try: return next(self.generator) - except StopIteration: + except Exception: self.close() raise diff --git a/api/core/model_runtime/model_providers/bedrock/get_bedrock_client.py b/api/core/model_runtime/model_providers/bedrock/get_bedrock_client.py new file mode 100644 index 00000000000000..a19ffbb20a6a9e --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/get_bedrock_client.py @@ -0,0 +1,21 @@ +import boto3 +from botocore.config import Config + + +def get_bedrock_client(service_name, credentials=None): + client_config = Config(region_name=credentials["aws_region"]) + aws_access_key_id = credentials["aws_access_key_id"] + aws_secret_access_key = credentials["aws_secret_access_key"] + if aws_access_key_id and aws_secret_access_key: + # use aksk to call bedrock + client = boto3.client( + service_name=service_name, + config=client_config, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ) + else: + # use iam without aksk to call + client = boto3.client(service_name=service_name, config=client_config) + + return client diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index e6e8a765ee9e05..75ed7ad62404cb 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -40,6 +40,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.model_providers.bedrock.get_bedrock_client import get_bedrock_client logger = logging.getLogger(__name__) ANTHROPIC_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. @@ -173,13 +174,7 @@ def _generate_with_converse( :param stream: is stream response :return: full response or stream response chunk generator result """ - bedrock_client = boto3.client( - service_name="bedrock-runtime", - aws_access_key_id=credentials.get("aws_access_key_id"), - aws_secret_access_key=credentials.get("aws_secret_access_key"), - region_name=credentials["aws_region"], - ) - + bedrock_client = get_bedrock_client("bedrock-runtime", credentials) system, prompt_message_dicts = self._convert_converse_prompt_messages(prompt_messages) inference_config, additional_model_fields = self._convert_converse_api_model_parameters(model_parameters, stop) diff --git a/api/core/model_runtime/model_providers/bedrock/rerank/rerank.py b/api/core/model_runtime/model_providers/bedrock/rerank/rerank.py index 397f65e8c960c8..e134db646f3d39 100644 --- a/api/core/model_runtime/model_providers/bedrock/rerank/rerank.py +++ b/api/core/model_runtime/model_providers/bedrock/rerank/rerank.py @@ -1,8 +1,5 @@ from typing import Optional -import boto3 -from botocore.config import Config - from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, @@ -14,6 +11,7 @@ ) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.rerank_model import RerankModel +from core.model_runtime.model_providers.bedrock.get_bedrock_client import get_bedrock_client class BedrockRerankModel(RerankModel): @@ -48,13 +46,7 @@ def _invoke( return RerankResult(model=model, docs=docs) # initialize client - client_config = Config(region_name=credentials["aws_region"]) - bedrock_runtime = boto3.client( - service_name="bedrock-agent-runtime", - config=client_config, - aws_access_key_id=credentials.get("aws_access_key_id", ""), - aws_secret_access_key=credentials.get("aws_secret_access_key"), - ) + bedrock_runtime = get_bedrock_client("bedrock-agent-runtime", credentials) queries = [{"type": "TEXT", "textQuery": {"text": query}}] text_sources = [] for text in docs: diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py index 2f998d8bdaee90..5505797f7658b3 100644 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py @@ -3,8 +3,6 @@ import time from typing import Optional -import boto3 -from botocore.config import Config from botocore.exceptions import ( ClientError, EndpointConnectionError, @@ -25,6 +23,7 @@ InvokeServerUnavailableError, ) from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel +from core.model_runtime.model_providers.bedrock.get_bedrock_client import get_bedrock_client logger = logging.getLogger(__name__) @@ -48,14 +47,7 @@ def _invoke( :param input_type: input type :return: embeddings result """ - client_config = Config(region_name=credentials["aws_region"]) - - bedrock_runtime = boto3.client( - service_name="bedrock-runtime", - config=client_config, - aws_access_key_id=credentials.get("aws_access_key_id"), - aws_secret_access_key=credentials.get("aws_secret_access_key"), - ) + bedrock_runtime = get_bedrock_client("bedrock-runtime", credentials) embeddings = [] token_usage = 0 diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-2.0-flash-exp.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-2.0-flash-exp.yaml new file mode 100644 index 00000000000000..bcd59623a78e43 --- /dev/null +++ b/api/core/model_runtime/model_providers/google/llm/gemini-2.0-flash-exp.yaml @@ -0,0 +1,39 @@ +model: gemini-2.0-flash-exp +label: + en_US: Gemini 2.0 Flash Exp +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/_position.yaml b/api/core/model_runtime/model_providers/groq/llm/_position.yaml index 0613b19f87ee5e..279c1bcbe5ae92 100644 --- a/api/core/model_runtime/model_providers/groq/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/groq/llm/_position.yaml @@ -1,4 +1,5 @@ - llama-3.1-405b-reasoning +- llama-3.3-70b-versatile - llama-3.1-70b-versatile - llama-3.1-8b-instant - llama3-70b-8192 diff --git a/api/core/model_runtime/model_providers/groq/llm/gemma-7b-it.yaml b/api/core/model_runtime/model_providers/groq/llm/gemma-7b-it.yaml new file mode 100644 index 00000000000000..02f84e95f6e348 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/gemma-7b-it.yaml @@ -0,0 +1,25 @@ +model: gemma-7b-it +label: + zh_Hans: Gemma 7B Instruction Tuned + en_US: Gemma 7B Instruction Tuned +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/gemma2-9b-it.yaml b/api/core/model_runtime/model_providers/groq/llm/gemma2-9b-it.yaml new file mode 100644 index 00000000000000..dad496f668ab94 --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/gemma2-9b-it.yaml @@ -0,0 +1,25 @@ +model: gemma2-9b-it +label: + zh_Hans: Gemma 2 9B Instruction Tuned + en_US: Gemma 2 9B Instruction Tuned +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.1' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml index ab5f6ab05efe31..01323a1b8a74f4 100644 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml @@ -1,7 +1,8 @@ model: llama-3.1-70b-versatile +deprecated: true label: - zh_Hans: Llama-3.1-70b-versatile - en_US: Llama-3.1-70b-versatile + zh_Hans: Llama-3.1-70b-versatile (DEPRECATED) + en_US: Llama-3.1-70b-versatile (DEPRECATED) model_type: llm features: - agent-thought diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml index 019d45372361d3..3f30d81ae4e26c 100644 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml @@ -1,4 +1,5 @@ model: llama-3.2-11b-text-preview +deprecated: true label: zh_Hans: Llama 3.2 11B Text (Preview) en_US: Llama 3.2 11B Text (Preview) diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml index 3b34e7c07996bd..0391a7c890cec4 100644 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml +++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml @@ -1,4 +1,5 @@ model: llama-3.2-90b-text-preview +depraceted: true label: zh_Hans: Llama 3.2 90B Text (Preview) en_US: Llama 3.2 90B Text (Preview) diff --git a/api/core/model_runtime/model_providers/groq/llm/llama3-groq-70b-8192-tool-use-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama3-groq-70b-8192-tool-use-preview.yaml new file mode 100644 index 00000000000000..32ccbf1f4db29b --- /dev/null +++ b/api/core/model_runtime/model_providers/groq/llm/llama3-groq-70b-8192-tool-use-preview.yaml @@ -0,0 +1,25 @@ +model: llama3-groq-70b-8192-tool-use-preview +label: + zh_Hans: Llama3-groq-70b-8192-tool-use (PREVIEW) + en_US: Llama3-groq-70b-8192-tool-use (PREVIEW) +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 8192 +pricing: + input: '0.05' + output: '0.08' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-exp.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-exp.yaml new file mode 100644 index 00000000000000..bcd59623a78e43 --- /dev/null +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-2.0-flash-exp.yaml @@ -0,0 +1,39 @@ +model: gemini-2.0-flash-exp +label: + en_US: Gemini 2.0 Flash Exp +model_type: llm +features: + - agent-thought + - vision + - tool-call + - stream-tool-call + - document +model_properties: + mode: chat + context_size: 1048576 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: top_k + label: + zh_Hans: 取样数量 + en_US: Top k + type: int + help: + zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 + en_US: Only sample from the top K options for each subsequent token. + required: false + - name: max_output_tokens + use_template: max_tokens + default: 8192 + min: 1 + max: 8192 + - name: json_schema + use_template: json_schema +pricing: + input: '0.00' + output: '0.00' + unit: '0.000001' + currency: USD diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 0cba40c51a0d19..9bfd9f1dc8e773 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -65,8 +65,11 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - trace_id = trace_info.workflow_app_log_id or trace_info.workflow_run_id + trace_id = trace_info.workflow_run_id user_id = trace_info.metadata.get("user_id") + metadata = trace_info.metadata + metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id + if trace_info.message_id: trace_id = trace_info.message_id name = TraceTaskName.MESSAGE_TRACE.value @@ -76,7 +79,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): name=name, input=trace_info.workflow_run_inputs, output=trace_info.workflow_run_outputs, - metadata=trace_info.metadata, + metadata=metadata, session_id=trace_info.conversation_id, tags=["message", "workflow"], created_at=trace_info.start_time, @@ -84,14 +87,14 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): ) self.add_trace(langfuse_trace_data=trace_data) workflow_span_data = LangfuseSpan( - id=(trace_info.workflow_app_log_id or trace_info.workflow_run_id), + id=trace_info.workflow_run_id, name=TraceTaskName.WORKFLOW_TRACE.value, input=trace_info.workflow_run_inputs, output=trace_info.workflow_run_outputs, trace_id=trace_id, start_time=trace_info.start_time, end_time=trace_info.end_time, - metadata=trace_info.metadata, + metadata=metadata, level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, status_message=trace_info.error or "", ) @@ -103,7 +106,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): name=TraceTaskName.WORKFLOW_TRACE.value, input=trace_info.workflow_run_inputs, output=trace_info.workflow_run_outputs, - metadata=trace_info.metadata, + metadata=metadata, session_id=trace_info.conversation_id, tags=["workflow"], ) @@ -192,7 +195,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): metadata=metadata, level=(LevelEnum.DEFAULT if status == "succeeded" else LevelEnum.ERROR), status_message=trace_info.error or "", - parent_observation_id=(trace_info.workflow_app_log_id or trace_info.workflow_run_id), + parent_observation_id=trace_info.workflow_run_id, ) else: span_data = LangfuseSpan( diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index c15b132abd30a2..672843e5a8f986 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -62,15 +62,17 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - trace_id = trace_info.message_id or trace_info.workflow_app_log_id or trace_info.workflow_run_id + trace_id = trace_info.message_id or trace_info.workflow_run_id message_dotted_order = ( generate_dotted_order(trace_info.message_id, trace_info.start_time) if trace_info.message_id else None ) workflow_dotted_order = generate_dotted_order( - trace_info.workflow_app_log_id or trace_info.workflow_run_id, + trace_info.workflow_run_id, trace_info.workflow_data.created_at, message_dotted_order, ) + metadata = trace_info.metadata + metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id if trace_info.message_id: message_run = LangSmithRunModel( @@ -82,7 +84,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): start_time=trace_info.start_time, end_time=trace_info.end_time, extra={ - "metadata": trace_info.metadata, + "metadata": metadata, }, tags=["message", "workflow"], error=trace_info.error, @@ -94,7 +96,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): langsmith_run = LangSmithRunModel( file_list=trace_info.file_list, total_tokens=trace_info.total_tokens, - id=trace_info.workflow_app_log_id or trace_info.workflow_run_id, + id=trace_info.workflow_run_id, name=TraceTaskName.WORKFLOW_TRACE.value, inputs=trace_info.workflow_run_inputs, run_type=LangSmithRunType.tool, @@ -102,7 +104,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): end_time=trace_info.workflow_data.finished_at, outputs=trace_info.workflow_run_outputs, extra={ - "metadata": trace_info.metadata, + "metadata": metadata, }, error=trace_info.error, tags=["workflow"], @@ -204,7 +206,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): extra={ "metadata": metadata, }, - parent_run_id=trace_info.workflow_app_log_id or trace_info.workflow_run_id, + parent_run_id=trace_info.workflow_run_id, tags=["node_execution"], id=node_execution_id, trace_id=trace_id, diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index a0b1aa4cefbd1f..69659e31080da6 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -103,7 +103,7 @@ def extract( extractor = ExcelExtractor(file_path) elif file_extension == ".pdf": extractor = PdfExtractor(file_path) - elif file_extension in {".md", ".markdown"}: + elif file_extension in {".md", ".markdown", ".mdx"}: extractor = ( UnstructuredMarkdownExtractor(file_path, unstructured_api_url, unstructured_api_key) if is_automatic @@ -141,7 +141,7 @@ def extract( extractor = ExcelExtractor(file_path) elif file_extension == ".pdf": extractor = PdfExtractor(file_path) - elif file_extension in {".md", ".markdown"}: + elif file_extension in {".md", ".markdown", ".mdx"}: extractor = MarkdownExtractor(file_path, autodetect_encoding=True) elif file_extension in {".htm", ".html"}: extractor = HtmlExtractor(file_path) diff --git a/api/extensions/storage/opendal_storage.py b/api/extensions/storage/opendal_storage.py index ec6c38632cfaa9..dc71839c707e4b 100644 --- a/api/extensions/storage/opendal_storage.py +++ b/api/extensions/storage/opendal_storage.py @@ -58,8 +58,14 @@ def download(self, filename: str, target_filepath: str): with Path(target_filepath).open("wb") as f: f.write(self.op.read(path=filename)) - def exists(self, filename: str): - return self.op.stat(path=filename).mode.is_file() + def exists(self, filename: str) -> bool: + # FIXME this is a workaround for opendal python-binding do not have a exists method and no better + # error handler here when opendal python-binding has a exists method, we should use it + # more https://github.com/apache/opendal/blob/main/bindings/python/src/operator.rs + try: + return self.op.stat(path=filename).mode.is_file() + except Exception as e: + return False def delete(self, filename: str): if self.exists(filename): diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 545de8190cb51e..9def7d15e928d4 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -108,6 +108,9 @@ def generate( raise ValueError(f"Invalid app mode {app_model.mode}") except RateLimitError as e: raise InvokeRateLimitError(str(e)) + except Exception: + rate_limit.exit(request_id) + raise finally: if not streaming: rate_limit.exit(request_id) diff --git a/api/tests/unit_tests/oss/opendal/test_opendal.py b/api/tests/unit_tests/oss/opendal/test_opendal.py index 0a6eb4275b2195..78420927c2247e 100644 --- a/api/tests/unit_tests/oss/opendal/test_opendal.py +++ b/api/tests/unit_tests/oss/opendal/test_opendal.py @@ -1,15 +1,20 @@ +import os +from collections.abc import Generator +from pathlib import Path + import pytest from configs.middleware.storage.opendal_storage_config import OpenDALScheme from extensions.storage.opendal_storage import OpenDALStorage from tests.unit_tests.oss.__mock.base import ( - BaseStorageTest, + get_example_data, + get_example_filename, + get_example_filepath, get_example_folder, ) -from tests.unit_tests.oss.__mock.local import setup_local_fs_mock -class TestOpenDAL(BaseStorageTest): +class TestOpenDAL: @pytest.fixture(autouse=True) def setup_method(self, *args, **kwargs): """Executed before each test method.""" @@ -17,3 +22,68 @@ def setup_method(self, *args, **kwargs): scheme=OpenDALScheme.FS, root=get_example_folder(), ) + + def teardown_method(self, method): + """Clean up after each test method.""" + try: + if self.storage.exists(get_example_filename()): + self.storage.delete(get_example_filename()) + + filepath = Path(get_example_filepath()) + if filepath.exists(): + filepath.unlink() + except: + pass + + def test_save_and_exists(self): + """Test saving data and checking existence.""" + filename = get_example_filename() + data = get_example_data() + + assert not self.storage.exists(filename) + self.storage.save(filename, data) + assert self.storage.exists(filename) + + def test_load_once(self): + """Test loading data once.""" + filename = get_example_filename() + data = get_example_data() + + self.storage.save(filename, data) + loaded_data = self.storage.load_once(filename) + assert loaded_data == data + + def test_load_stream(self): + """Test loading data as a stream.""" + filename = get_example_filename() + data = get_example_data() + + self.storage.save(filename, data) + generator = self.storage.load_stream(filename) + assert isinstance(generator, Generator) + assert next(generator) == data + + def test_download(self): + """Test downloading data to a file.""" + filename = get_example_filename() + filepath = get_example_filepath() + data = get_example_data() + + self.storage.save(filename, data) + self.storage.download(filename, filepath) + + downloaded_path = Path(filepath) + assert downloaded_path.exists() + downloaded_data = downloaded_path.read_bytes() + assert downloaded_data == data + + def test_delete(self): + """Test deleting a file.""" + filename = get_example_filename() + data = get_example_data() + + self.storage.save(filename, data) + assert self.storage.exists(filename) + + self.storage.delete(filename) + assert not self.storage.exists(filename) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx index 96ee874d53caff..7a5347c7d54eed 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx @@ -141,7 +141,7 @@ const AppDetailLayout: FC = (props) => { if (!appDetail) { return ( -
+
) @@ -152,7 +152,7 @@ const AppDetailLayout: FC = (props) => { {appDetail && ( )} -
+
{children}
diff --git a/web/app/components/app/annotation/add-annotation-modal/edit-item/index.tsx b/web/app/components/app/annotation/add-annotation-modal/edit-item/index.tsx index 4da6b7cac4d0b4..032e4b83576adf 100644 --- a/web/app/components/app/annotation/add-annotation-modal/edit-item/index.tsx +++ b/web/app/components/app/annotation/add-annotation-modal/edit-item/index.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import React from 'react' import { useTranslation } from 'react-i18next' -import Textarea from 'rc-textarea' +import Textarea from '@/app/components/base/textarea' import { Robot, User } from '@/app/components/base/icons/src/public/avatar' export enum EditItemType { @@ -31,12 +31,10 @@ const EditItem: FC = ({ {avatar}
-
{name}
+
{name}