From 2f31014465cbcf88961eec2094bd9e25c44e2dea Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 25 Jun 2025 09:50:45 +0000 Subject: [PATCH 1/3] dowwngrade Signed-off-by: jiang1.li --- csrc/cpu/torch_bindings.cpp | 13 +++++--- docker/Dockerfile.cpu | 33 +++++++++++-------- requirements/cpu-build.txt | 12 +++++++ requirements/cpu.txt | 5 +-- .../multimodal/generation/test_common.py | 1 + .../generation/vlm_utils/builders.py | 3 ++ vllm/model_executor/layers/fused_moe/layer.py | 2 ++ .../layers/quantization/ipex_quant.py | 2 +- 8 files changed, 51 insertions(+), 20 deletions(-) create mode 100644 requirements/cpu-build.txt diff --git a/csrc/cpu/torch_bindings.cpp b/csrc/cpu/torch_bindings.cpp index 447e826bc1c0..60304d229a8f 100644 --- a/csrc/cpu/torch_bindings.cpp +++ b/csrc/cpu/torch_bindings.cpp @@ -131,16 +131,19 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // Quantization #ifdef __AVX512F__ + at::Tag stride_tag = at::Tag::needs_fixed_stride_order; // Compute int8 quantized tensor for given scaling factor. ops.def( "static_scaled_int8_quant(Tensor! out, Tensor input, Tensor scale," - "Tensor? azp) -> ()"); + "Tensor? azp) -> ()", + {stride_tag}); ops.impl("static_scaled_int8_quant", torch::kCPU, &static_scaled_int8_quant); // Compute int8 quantized tensor and scaling factor ops.def( "dynamic_scaled_int8_quant(Tensor! out, Tensor input, Tensor! scale, " - "Tensor!? azp) -> ()"); + "Tensor!? azp) -> ()", + {stride_tag}); ops.impl("dynamic_scaled_int8_quant", torch::kCPU, &dynamic_scaled_int8_quant); // W8A8 GEMM, supporting symmetric per-tensor or per-row/column @@ -148,7 +151,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { ops.def( "cutlass_scaled_mm(Tensor! out, Tensor a," " Tensor b, Tensor a_scales," - " Tensor b_scales, Tensor? bias) -> ()"); + " Tensor b_scales, Tensor? bias) -> ()", + {stride_tag}); ops.impl("cutlass_scaled_mm", torch::kCPU, &int8_scaled_mm); // w8a8 GEMM, supporting asymmetric per-tensor or per-row/column // quantization. @@ -156,7 +160,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "cutlass_scaled_mm_azp(Tensor! out, Tensor a," " Tensor b, Tensor a_scales," " Tensor b_scales, Tensor azp_adj," - " Tensor? azp, Tensor? bias) -> ()"); + " Tensor? azp, Tensor? bias) -> ()", + {stride_tag}); ops.impl("cutlass_scaled_mm_azp", torch::kCPU, &int8_scaled_mm_azp); #elif defined(__powerpc64__) // Compute int8 quantized tensor for given scaling factor. diff --git a/docker/Dockerfile.cpu b/docker/Dockerfile.cpu index 3e9fa0e7af2d..13bd03c5696a 100644 --- a/docker/Dockerfile.cpu +++ b/docker/Dockerfile.cpu @@ -66,7 +66,7 @@ ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512} WORKDIR /workspace/vllm RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/build.txt,target=requirements/build.txt \ + --mount=type=bind,src=requirements/cpu-build.txt,target=requirements/build.txt \ uv pip install -r requirements/build.txt COPY . . @@ -79,6 +79,22 @@ RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=.git,target=.git \ VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel +######################### TEST DEPS ######################### +FROM base AS vllm-test-deps + +WORKDIR /workspace/vllm + +RUN --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ + cp requirements/test.in requirements/cpu-test.in && \ + sed -i '/mamba_ssm/d' requirements/cpu-test.in && \ + sed -i 's/torch==.*/torch==2.6.0/g' requirements/cpu-test.in && \ + sed -i 's/torchaudio.*/torchaudio/g' requirements/cpu-test.in && \ + sed -i 's/torchvision.*/torchvision/g' requirements/cpu-test.in && \ + uv pip compile requirements/cpu-test.in -o requirements/cpu-test.txt --index-strategy unsafe-best-match --torch-backend cpu + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install -r requirements/cpu-test.txt + ######################### DEV IMAGE ######################### FROM vllm-build AS vllm-dev @@ -97,28 +113,19 @@ RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=.git,target=.git \ VLLM_TARGET_DEVICE=cpu python3 setup.py develop +COPY --from=vllm-test-deps /workspace/vllm/requirements/cpu-test.txt requirements/test.txt + RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ - cp requirements/test.in requirements/test-cpu.in && \ - sed -i '/mamba_ssm/d' requirements/test-cpu.in && \ - uv pip compile requirements/test-cpu.in -o requirements/test.txt && \ uv pip install -r requirements/dev.txt && \ pre-commit install --hook-type pre-commit --hook-type commit-msg ENTRYPOINT ["bash"] ######################### TEST IMAGE ######################### -FROM base AS vllm-test +FROM vllm-test-deps AS vllm-test WORKDIR /workspace/ -RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ - cp requirements/test.in requirements/test-cpu.in && \ - sed -i '/mamba_ssm/d' requirements/test-cpu.in && \ - uv pip compile requirements/test-cpu.in -o requirements/cpu-test.txt && \ - uv pip install -r requirements/cpu-test.txt - RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \ uv pip install dist/*.whl diff --git a/requirements/cpu-build.txt b/requirements/cpu-build.txt new file mode 100644 index 000000000000..12f2e86da3f9 --- /dev/null +++ b/requirements/cpu-build.txt @@ -0,0 +1,12 @@ +# Temporarily used for x86 CPU backend to avoid performance regression of torch>=2.6.0+cpu, +# see https://github.com/pytorch/pytorch/pull/151218 +cmake>=3.26.1 +ninja +packaging>=24.2 +setuptools>=77.0.3,<80.0.0 +setuptools-scm>=8 +--extra-index-url https://download.pytorch.org/whl/cpu +torch==2.6.0+cpu +wheel +jinja2>=3.1.6 +regex diff --git a/requirements/cpu.txt b/requirements/cpu.txt index 8742898cff00..7b6261fdee8b 100644 --- a/requirements/cpu.txt +++ b/requirements/cpu.txt @@ -8,7 +8,7 @@ numba == 0.61.2; python_version > '3.9' packaging>=24.2 setuptools>=77.0.3,<80.0.0 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.7.0+cpu; platform_machine == "x86_64" +torch==2.6.0+cpu; platform_machine == "x86_64" # torch>=2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 torch==2.7.0; platform_system == "Darwin" torch==2.7.0; platform_machine == "ppc64le" or platform_machine == "aarch64" @@ -23,6 +23,7 @@ datasets # for benchmark scripts # Intel Extension for PyTorch, only for x86_64 CPUs intel-openmp==2024.2.1; platform_machine == "x86_64" -intel_extension_for_pytorch==2.7.0; platform_machine == "x86_64" +intel_extension_for_pytorch==2.6.0; platform_machine == "x86_64" # torch>=2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 py-libnuma; platform_system != "Darwin" psutil; platform_system != "Darwin" +triton==3.2.0; platform_machine == "x86_64" # Triton is required for torch 2.6+cpu, as it is imported in torch.compile. diff --git a/tests/models/multimodal/generation/test_common.py b/tests/models/multimodal/generation/test_common.py index 496850b19af4..e6179619d26d 100644 --- a/tests/models/multimodal/generation/test_common.py +++ b/tests/models/multimodal/generation/test_common.py @@ -107,6 +107,7 @@ ), limit_mm_per_prompt={"image": 4}, )], + dtype="bfloat16" if current_platform.is_cpu() else "auto", marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), "paligemma": VLMTestInfo( diff --git a/tests/models/multimodal/generation/vlm_utils/builders.py b/tests/models/multimodal/generation/vlm_utils/builders.py index 7d20dd66089b..03c08240d6a8 100644 --- a/tests/models/multimodal/generation/vlm_utils/builders.py +++ b/tests/models/multimodal/generation/vlm_utils/builders.py @@ -203,6 +203,9 @@ def build_embedding_inputs_from_test_info( images = [asset.pil_image for asset in image_assets] embeds = test_info.convert_assets_to_embeddings(image_assets) + if test_info.dtype != "auto": + dtype = getattr(torch, test_info.dtype) # type: ignore + embeds = [e.to(dtype=dtype) for e in embeds] assert len(images) == len(model_prompts) inputs = build_single_image_inputs(images, model_prompts, size_wrapper) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index c1bae033c2b4..133881fd0499 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -54,6 +54,8 @@ if is_rocm_aiter_moe_enabled(): from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa: E501 rocm_aiter_grouped_topk as grouped_topk) +elif current_platform.is_cpu(): + pass else: from vllm.model_executor.layers.fused_moe.fused_moe import grouped_topk if current_platform.is_tpu(): diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 31ad96eccaf3..8ceab149bd51 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -15,7 +15,7 @@ from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.platforms import current_platform -MIN_IPEX_VERSION = "2.7.0" +MIN_IPEX_VERSION = "2.4.0" class IPEXConfig(QuantizationConfig): From f3c41cfdc80b6b645454c6664c5e8a3a7a07b8da Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 26 Jun 2025 05:26:57 +0000 Subject: [PATCH 2/3] fix Signed-off-by: jiang1.li --- tests/models/multimodal/generation/test_common.py | 1 + vllm/model_executor/layers/quantization/ipex_quant.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/multimodal/generation/test_common.py b/tests/models/multimodal/generation/test_common.py index e6179619d26d..9d63339737ce 100644 --- a/tests/models/multimodal/generation/test_common.py +++ b/tests/models/multimodal/generation/test_common.py @@ -107,6 +107,7 @@ ), limit_mm_per_prompt={"image": 4}, )], + # TODO: Revert to "auto" when CPU backend can use torch > 2.6 dtype="bfloat16" if current_platform.is_cpu() else "auto", marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 8ceab149bd51..428e9b882bca 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -15,7 +15,7 @@ from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.platforms import current_platform -MIN_IPEX_VERSION = "2.4.0" +MIN_IPEX_VERSION = "2.6.0" class IPEXConfig(QuantizationConfig): From 687f84dbd150f5aef55505b4e445ddb96db228c2 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 26 Jun 2025 07:23:13 +0000 Subject: [PATCH 3/3] fix Signed-off-by: jiang1.li --- requirements/cpu-build.txt | 2 +- requirements/cpu.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/cpu-build.txt b/requirements/cpu-build.txt index 12f2e86da3f9..37f072202bd7 100644 --- a/requirements/cpu-build.txt +++ b/requirements/cpu-build.txt @@ -1,4 +1,4 @@ -# Temporarily used for x86 CPU backend to avoid performance regression of torch>=2.6.0+cpu, +# Temporarily used for x86 CPU backend to avoid performance regression of torch>2.6.0+cpu, # see https://github.com/pytorch/pytorch/pull/151218 cmake>=3.26.1 ninja diff --git a/requirements/cpu.txt b/requirements/cpu.txt index 7b6261fdee8b..df3a3393563a 100644 --- a/requirements/cpu.txt +++ b/requirements/cpu.txt @@ -8,7 +8,7 @@ numba == 0.61.2; python_version > '3.9' packaging>=24.2 setuptools>=77.0.3,<80.0.0 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.6.0+cpu; platform_machine == "x86_64" # torch>=2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 +torch==2.6.0+cpu; platform_machine == "x86_64" # torch>2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 torch==2.7.0; platform_system == "Darwin" torch==2.7.0; platform_machine == "ppc64le" or platform_machine == "aarch64" @@ -23,7 +23,7 @@ datasets # for benchmark scripts # Intel Extension for PyTorch, only for x86_64 CPUs intel-openmp==2024.2.1; platform_machine == "x86_64" -intel_extension_for_pytorch==2.6.0; platform_machine == "x86_64" # torch>=2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 +intel_extension_for_pytorch==2.6.0; platform_machine == "x86_64" # torch>2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 py-libnuma; platform_system != "Darwin" psutil; platform_system != "Darwin" triton==3.2.0; platform_machine == "x86_64" # Triton is required for torch 2.6+cpu, as it is imported in torch.compile.