From 947cb3050949c80efc5d739d690847334c85f069 Mon Sep 17 00:00:00 2001 From: Jintao Date: Sat, 12 Oct 2024 14:24:53 +0800 Subject: [PATCH] fix deploy timeout (#2230) --- ...\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" | 2 +- docs/source_en/Instruction/Command-line-parameters.md | 2 +- swift/llm/utils/argument.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git "a/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" "b/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" index ce405e17d..e89292cc6 100644 --- "a/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" +++ "b/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" @@ -411,7 +411,7 @@ eval参数继承了infer参数,除此之外增加了以下参数:(注意: - `--eval_output_dir`: 评测结果输出路径, 默认是当前文件夹下的`eval_outputs`路径. - `--eval_batch_size`: 评测的输入batch_size, 默认是8 - `--eval_nproc`: 并发数, 更大的并发数可以更快评测, 但显存占用也更高, 默认值16. 本参数仅对多模态评测生效. -- `--deploy_timeout`: 评测之前会启动模型部署, 该参数设置部署的等待超时时长, 默认值为60, 代表一分钟. +- `--deploy_timeout`: 评测之前会启动模型部署, 该参数设置部署的等待超时时长, 默认值为`1800`, 代表30分钟. ## app-ui 参数 diff --git a/docs/source_en/Instruction/Command-line-parameters.md b/docs/source_en/Instruction/Command-line-parameters.md index b58ee19ea..31a18c7c8 100644 --- a/docs/source_en/Instruction/Command-line-parameters.md +++ b/docs/source_en/Instruction/Command-line-parameters.md @@ -412,7 +412,7 @@ The eval parameters inherit from the infer parameters, and additionally include - `--eval_output_dir`: Output path for evaluation results, default is `eval_outputs` in the current folder. - `--eval_batch_size`: Input batch size for evaluation, default is 8. - `--eval_nproc`: Concurrent number, a bigger value means a faster evaluation and more cost of GPU memory, default 16. This only takes effects when running multi-modal evaluations. -- `--deploy_timeout`: The timeout duration for waiting for model deployment before evaluation, default is 60, which means one minute. +- `--deploy_timeout`: The timeout duration for waiting for model deployment before evaluation, default is `1800`, which means 30 minutes. ## app-ui Parameters diff --git a/swift/llm/utils/argument.py b/swift/llm/utils/argument.py index 35c1998b5..8c77f4d8d 100644 --- a/swift/llm/utils/argument.py +++ b/swift/llm/utils/argument.py @@ -1602,7 +1602,7 @@ class EvalArguments(InferArguments): eval_output_dir: str = 'eval_outputs' eval_backend: Literal['Native', 'OpenCompass'] = 'OpenCompass' eval_batch_size: int = 8 - deploy_timeout: int = 60 + deploy_timeout: int = 1800 do_sample: bool = False # Note: for evaluation default is False temperature: float = 0.