@@ -119,12 +119,13 @@ WORKDIR /workspace
119119
120120### NIXL SETUP ###
121121# Copy nixl source, and use commit hash as cache hint
122+ # TEMP: disable gds backend for arm64
122123COPY --from=nixl_base /opt/nixl /opt/nixl
123124COPY --from=nixl_base /opt/nixl/commit.txt /opt/nixl/commit.txt
124125RUN if [ "$ARCH" = "arm64" ]; then \
125126 cd /opt/nixl && \
126127 mkdir build && \
127- meson setup build/ --buildtype=release --prefix=/usr/local/nixl -Dgds_path=/usr/local/cuda/targets/sbsa-linux && \
128+ meson setup build/ --buildtype=release --prefix=/usr/local/nixl -Ddisable_gds_backend=true - Dgds_path=/usr/local/cuda/targets/sbsa-linux && \
128129 cd build/ && \
129130 ninja && \
130131 ninja install; \
@@ -163,8 +164,10 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
163164
164165# Install NIXL Python module
165166# TODO: Move gds_path selection based on arch into NIXL build
167+ # TEMP: disable gds backend for arm64
166168RUN if [ "$ARCH" = "arm64" ]; then \
167169 cd /opt/nixl && uv build . --out-dir /workspace/wheels/nixl \
170+ --config-settings=setup-args="-Ddisable_gds_backend=true" \
168171 --config-settings=setup-args="-Dgds_path=/usr/local/cuda/targets/sbsa-linux"; \
169172 else \
170173 cd /opt/nixl && uv build . --out-dir /workspace/wheels/nixl; \
@@ -177,22 +180,43 @@ RUN uv pip install /workspace/wheels/nixl/*.whl
177180# Install vllm - keep this early in Dockerfile to avoid
178181# rebuilds from unrelated source code changes
179182ARG VLLM_REF="059d4cd"
183+ ARG MAX_JOBS=16
184+ ENV MAX_JOBS=$MAX_JOBS
180185ENV CUDA_HOME=/usr/local/cuda
181186RUN --mount=type=bind,source=./container/deps/,target=/tmp/deps \
182187 --mount=type=cache,target=/root/.cache/uv \
183- uv pip install pip cuda-python && \
184- mkdir /opt/vllm && \
185- cd /opt/vllm && \
186- git clone https://github.com/vllm-project/vllm.git && \
187- cd vllm && \
188- git checkout $VLLM_REF && \
189- VLLM_USE_PRECOMPILED=1 uv pip install -e . && \
190- cd tools/ep_kernels && \
191- bash install_python_libraries.sh && \
192- cd ep_kernels_workspace && \
193- git clone --recursive https://github.com/deepseek-ai/DeepGEMM.git && \
194- cd DeepGEMM && \
195- python setup.py install
188+ if [ "$ARCH" = "arm64" ]; then \
189+ uv pip install pip cuda-python && \
190+ mkdir /opt/vllm && \
191+ cd /opt/vllm && \
192+ git clone https://github.com/vllm-project/vllm.git && \
193+ cd vllm && \
194+ git checkout $VLLM_REF && \
195+ uv pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128 && \
196+ python use_existing_torch.py && \
197+ uv pip install -r requirements/build.txt && \
198+ MAX_JOBS=${MAX_JOBS} uv pip install --no-build-isolation -e . -v && \
199+ cd tools/ep_kernels && \
200+ bash install_python_libraries.sh && \
201+ cd ep_kernels_workspace && \
202+ git clone --recursive https://github.com/deepseek-ai/DeepGEMM.git && \
203+ cd DeepGEMM && \
204+ python setup.py install; \
205+ else \
206+ uv pip install pip cuda-python && \
207+ mkdir /opt/vllm && \
208+ cd /opt/vllm && \
209+ git clone https://github.com/vllm-project/vllm.git && \
210+ cd vllm && \
211+ git checkout $VLLM_REF && \
212+ VLLM_USE_PRECOMPILED=1 uv pip install -e . && \
213+ cd tools/ep_kernels && \
214+ bash install_python_libraries.sh && \
215+ cd ep_kernels_workspace && \
216+ git clone --recursive https://github.com/deepseek-ai/DeepGEMM.git && \
217+ cd DeepGEMM && \
218+ python setup.py install; \
219+ fi
196220
197221# Common dependencies
198222RUN --mount=type=bind,source=./container/deps/requirements.txt,target=/tmp/requirements.txt \
0 commit comments