Skip to content

Commit

Permalink
[Bugfix][CI/Build][Hardware][AMD] Install matching torchvision to fix…
Browse files Browse the repository at this point in the history
… AMD tests (vllm-project#5949)
  • Loading branch information
mawong-amd authored and jimpang committed Jul 8, 2024
1 parent eb4a5cc commit 7996da2
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 8 deletions.
18 changes: 12 additions & 6 deletions Dockerfile.rocm
Original file line number Diff line number Diff line change
Expand Up @@ -55,16 +55,22 @@ RUN apt-get purge -y sccache; pip uninstall -y sccache; rm -f "$(which sccache)"
# Install torch == 2.4.0 on ROCm
RUN case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \
*"rocm-5.7"*) \
pip uninstall -y torch \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \
pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm5.7;; \
*"rocm-6.0"*) \
pip uninstall -y torch \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \
pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm6.0;; \
*"rocm-6.1"*) \
pip uninstall -y torch \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \
pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm6.1;; \
*) ;; esac

Expand Down
4 changes: 2 additions & 2 deletions tests/entrypoints/test_openai_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from huggingface_hub import snapshot_download
from openai import BadRequestError

from ..utils import VLLM_PATH, RemoteOpenAIServer
from ..utils import RemoteOpenAIServer

# any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
Expand Down Expand Up @@ -79,7 +79,7 @@ def zephyr_lora_files():

@pytest.fixture(scope="module")
def ray_ctx():
ray.init(runtime_env={"working_dir": VLLM_PATH})
ray.init()
yield
ray.shutdown()

Expand Down

0 comments on commit 7996da2

Please sign in to comment.