File tree Expand file tree Collapse file tree 1 file changed +2
-14
lines changed Expand file tree Collapse file tree 1 file changed +2
-14
lines changed Original file line number Diff line number Diff line change 1313# limitations under the License.
1414
1515import importlib .resources as resources
16+ import logging
1617import os
1718import sys
1819
19- import torch
20- from accelerate import logging
2120from accelerate .commands .launch import launch_command , launch_command_parser
2221
2322from .scripts .dpo import make_parser as make_dpo_parser
3231from .scripts .vllm_serve import make_parser as make_vllm_serve_parser
3332
3433
35- logger = logging .get_logger (__name__ )
34+ logger = logging .getLogger (__name__ )
3635
3736
3837def main ():
@@ -144,17 +143,6 @@ def main():
144143
145144 elif args .command == "vllm-serve" :
146145 (script_args ,) = parser .parse_args_and_config ()
147-
148- # Known issue: Using DeepSpeed with tensor_parallel_size=1 and data_parallel_size>1 may cause a crash when
149- # launched via the CLI. Suggest running the module directly.
150- # More information: https://github.com/vllm-project/vllm/issues/17079
151- if script_args .tensor_parallel_size == 1 and script_args .data_parallel_size > 1 and torch .cuda .is_available ():
152- logger .warning (
153- "Detected configuration: tensor_parallel_size=1 and data_parallel_size>1. This setup is known to "
154- "cause a crash when using the `trl vllm-serve` CLI entry point. As a workaround, please run the "
155- "server using the module path instead: `python -m trl.scripts.vllm_serve`" ,
156- )
157-
158146 vllm_serve_main (script_args )
159147
160148
You can’t perform that action at this time.
0 commit comments