File tree Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Original file line number Diff line number Diff line change @@ -77,7 +77,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
7777# can be useful for both `dev` and `test`
7878# explicitly set the list to avoid issues with torch 2.2
7979# see https://github.com/pytorch/pytorch/pull/123243
80- ARG torch_cuda_arch_list='7.0 7.5 8.0 8.6 8. 9 9.0+PTX'
80+ ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10 .0+PTX'
8181ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
8282# Override the arch list for flash-attn to reduce the binary size
8383ARG vllm_fa_cmake_gpu_arches='80-real;90-real'
@@ -257,8 +257,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
257257if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \
258258 # uv pip install --system https://github.com/flashinfer-ai/flashinfer/releases/download/v0.2.4/flashinfer_python-0.2.4+cu124torch2.6-cp38-abi3-linux_x86_64.whl ; \
259259 # TESTING: install FlashInfer from source to test 2.7.0 final RC
260- FLASHINFER_ENABLE_AOT=1 TORCH_CUDA_ARCH_LIST='7.5 8.0 8.6 8. 9 9.0+PTX' \
261- uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@v0.2.4 " ; \
260+ FLASHINFER_ENABLE_AOT=1 TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0 10 .0+PTX' \
261+ uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@e00e8cedbfcb220f328fd36aa8f529f869b01e6b " ; \
262262fi
263263COPY examples examples
264264COPY benchmarks benchmarks
You can’t perform that action at this time.
0 commit comments