Skip to content

Commit

Permalink
fix ci
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangshulai committed Feb 14, 2025
1 parent 1bf9e5a commit 8f2ffae
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/vllm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ jobs:
run: |
cd tests/rollout
torchrun --standalone --nnodes=1 --nproc_per_node=8 $(which pytest) -s test_vllm_hf_loader.py
- name: Test vllm==0.7.0
- name: Test the latest vLLM
run: |
pip3 install vllm==0.7.0
pip3 install --upgrade vllm
cd tests/rollout
torchrun --standalone --nnodes=1 --nproc_per_node=8 $(which pytest) -s test_vllm_spmd.py
torchrun --standalone --nnodes=1 --nproc_per_node=4 $(which pytest) -s test_vllm_spmd.py
6 changes: 3 additions & 3 deletions tests/rollout/test_vllm_spmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def test_vllm_spmd():
hdfs_path = 'Qwen/Qwen2-7B-Instruct'
from verl.utils.fs import copy_local_path_from_hdfs
local_model_path = copy_local_path_from_hdfs(src=hdfs_path, cache_dir=local_cache_path)
tokenizer = AutoTokenizer.from_pretrained(local_model_path)
tokenizer = AutoTokenizer.from_pretrained(local_model_path, padding_side='left')

preencode_prompts = [
"Who won the Champions League in 2019?",
Expand Down Expand Up @@ -111,14 +111,14 @@ def test_vllm_spmd():
ignore_eos=True)

sampling_params = SamplingParams(**kwargs)
tensor_parallel_size = 8
tensor_parallel_size = 4

llm = LLM(model=local_model_path,
enable_sleep_mode=True,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend="external_launcher",
dtype='bfloat16',
gpu_memory_utilization=0.1)
gpu_memory_utilization=0.5)

print('start generation')
input_ids = input_ids.cuda()
Expand Down

0 comments on commit 8f2ffae

Please sign in to comment.