Skip to content

Commit 346701a

Browse files
authored
Replace accelerate logging with stdlib in CLI (#4512)
1 parent 4db63af commit 346701a

File tree

1 file changed

+2
-14
lines changed

1 file changed

+2
-14
lines changed

trl/cli.py

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,10 @@
1313
# limitations under the License.
1414

1515
import importlib.resources as resources
16+
import logging
1617
import os
1718
import sys
1819

19-
import torch
20-
from accelerate import logging
2120
from accelerate.commands.launch import launch_command, launch_command_parser
2221

2322
from .scripts.dpo import make_parser as make_dpo_parser
@@ -32,7 +31,7 @@
3231
from .scripts.vllm_serve import make_parser as make_vllm_serve_parser
3332

3433

35-
logger = logging.get_logger(__name__)
34+
logger = logging.getLogger(__name__)
3635

3736

3837
def main():
@@ -144,17 +143,6 @@ def main():
144143

145144
elif args.command == "vllm-serve":
146145
(script_args,) = parser.parse_args_and_config()
147-
148-
# Known issue: Using DeepSpeed with tensor_parallel_size=1 and data_parallel_size>1 may cause a crash when
149-
# launched via the CLI. Suggest running the module directly.
150-
# More information: https://github.com/vllm-project/vllm/issues/17079
151-
if script_args.tensor_parallel_size == 1 and script_args.data_parallel_size > 1 and torch.cuda.is_available():
152-
logger.warning(
153-
"Detected configuration: tensor_parallel_size=1 and data_parallel_size>1. This setup is known to "
154-
"cause a crash when using the `trl vllm-serve` CLI entry point. As a workaround, please run the "
155-
"server using the module path instead: `python -m trl.scripts.vllm_serve`",
156-
)
157-
158146
vllm_serve_main(script_args)
159147

160148

0 commit comments

Comments
 (0)