@@ -14,8 +14,10 @@ ARG PYTORCH_VISION_REPO="https://github.com/pytorch/vision.git"
1414ARG FA_BRANCH="1a7f4dfa"
1515ARG FA_REPO="https://github.com/Dao-AILab/flash-attention.git"
1616
17+ # if not using the submodule
1718ARG VLLM_BRANCH="v0.7.3+rocm"
18- ARG VLLM_VERSION=0.7.3
19+
20+ ARG VLLM_VERSION=0.8.1
1921ARG PYTORCH_ROCM_ARCH=gfx90a;gfx942
2022ARG VLLM_SOURCE=submodule
2123# or ARG VLLM_SOURCE=upstream
@@ -158,12 +160,17 @@ RUN cd pytorch && git checkout ${PYTORCH_BRANCH} && \
158160 && python3 tools/amd_build/build_amd.py \
159161 && CMAKE_PREFIX_PATH=$(python3 -c 'import sys; print(sys.prefix)') python3 setup.py bdist_wheel --dist-dir=dist \
160162 && pip install dist/*.whl
161- RUN git clone ${PYTORCH_VISION_REPO} vision
162- RUN cd vision && git checkout ${PYTORCH_VISION_BRANCH} \
163- && python3 setup.py bdist_wheel --dist-dir=dist \
164- && pip install dist/*.whl
165- RUN mkdir -p /workspace/install && cp /workspace/pytorch/dist/*.whl /workspace/install \
166- && cp /workspace/vision/dist/*.whl /workspace/install
163+ # RUN git clone ${PYTORCH_VISION_REPO} vision
164+ # RUN cd vision && git checkout ${PYTORCH_VISION_BRANCH} \
165+ # && python3 setup.py bdist_wheel --dist-dir=dist \
166+ # && pip install dist/*.whl
167+ # WORKDIR /workspace/vision/dist
168+ # RUN --mount=type=cache,target=/root/.cache/pip \
169+ # pip download torchvision==${PYTORCH_VISION_BRANCH} --no-deps
170+
171+ RUN mkdir -p /workspace/install && cp /workspace/pytorch/dist/*.whl /workspace/install
172+ # \
173+ # && cp /workspace/vision/dist/*.whl /workspace/install
167174
168175
169176## vLLM Builder #################################################################
@@ -242,7 +249,7 @@ ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}
242249
243250# Build vLLM
244251RUN cd vllm \
245- && python3 -m pip install -r requirements- rocm.txt
252+ && python3 -m pip install -r requirements/ rocm.txt
246253
247254ENV CMAKE_PREFIX_PATH="/opt/rocm/;/opt/rocm/hip;$(python3 -c 'import sys; print(sys.prefix)')"
248255RUN cd vllm \
@@ -332,9 +339,13 @@ WORKDIR /workspace
332339#ENV PYTORCH_ROCM_ARCH="gfx90a;gfx942"
333340ARG PYTORCH_ROCM_ARCH
334341ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}
335- COPY rocm_vllm/requirements-*.txt /workspace/
336- RUN python3 -m pip install -r requirements-rocm.txt \
337- && pip uninstall -y vllm
342+ COPY rocm_vllm/requirements/*.txt /workspace/
343+ RUN --mount=type=cache,target=/root/.cache/pip \
344+ --mount=type=cache,target=/root/.cache/uv \
345+ uv pip install -r rocm.txt \
346+ && pip uninstall -y vllm \
347+ # FIXME: remove once rocm requirements are updated again
348+ && uv pip install cachetools
338349# export PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} \
339350
340351# Install vllm
@@ -361,12 +372,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
361372 uv pip install /workspace/fa-install/*.whl
362373
363374# Install pytorch
375+ # ARG ROCM_VERSION
364376RUN mkdir -p /workspace/torch-install
365377COPY --from=pytorch-builder /workspace/install/*.whl /workspace/torch-install
366378RUN ls -al /workspace/torch-install/
367379RUN --mount=type=cache,target=/root/.cache/pip \
368380 --mount=type=cache,target=/root/.cache/uv \
369- uv pip install /workspace/torch-install/*.whl
381+ uv pip install /workspace/torch-install/*.whl && \
382+ uv pip install torchvision --no-deps --index-url https://download.pytorch.org/whl/rocm6.2.4
370383# install rocm pytorch
371384# RUN --mount=type=cache,target=/root/.cache/pip \
372385# --mount=type=cache,target=/root/.cache/uv \
@@ -387,7 +400,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
387400# ENV LD_LIBRARY_PATH="${VIRTUAL_ENV}/lib/python${PYTHON_VERSION}/site-packages/nvidia/cuda_cupti/lib:${LD_LIBRARY_PATH}"
388401
389402
390- # copy requirements before to avoid reinstall
403+ # copy requirements explicitly before to avoid reinstall
391404COPY triton-dejavu/requirements-opt.txt dejavu-requirements-opt.txt
392405RUN --mount=type=cache,target=/root/.cache/pip \
393406 --mount=type=cache,target=/root/.cache/uv \
@@ -418,16 +431,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
418431 --mount=type=cache,target=/root/.cache/uv \
419432 uv pip install pytest llnl-hatchet debugpy
420433
421-
422-
423434# install lm_eval
424435RUN --mount=type=cache,target=/root/.cache/pip \
425436 --mount=type=cache,target=/root/.cache/uv \
426437 git clone --depth 1 https://github.com/EleutherAI/lm-evaluation-harness && cd lm-evaluation-harness && uv pip install .
427438
428439# copy vllm benchmarks and download share GPT
429440COPY vllm/benchmarks benchmarks
430- RUN wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ ShareGPT_V3_unfiltered_cleaned_split.json
441+ COPY ShareGPT_V3_unfiltered_cleaned_split.json ShareGPT_V3_unfiltered_cleaned_split.json
431442
432443ENV STORE_TEST_RESULT_PATH=/results
433444
0 commit comments