diff --git a/.github/workflows/image_openeuler.yml b/.github/workflows/image_openeuler.yml index da94cac8f3..f6f7cea333 100644 --- a/.github/workflows/image_openeuler.yml +++ b/.github/workflows/image_openeuler.yml @@ -88,6 +88,8 @@ jobs: uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 + # use the current repo path as the build context, ensure .git is contained + context: . # only trigger when tag, branch/main push push: ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }} labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/image.yml b/.github/workflows/image_ubuntu.yml similarity index 88% rename from .github/workflows/image.yml rename to .github/workflows/image_ubuntu.yml index ba9fecad39..5cdf076839 100644 --- a/.github/workflows/image.yml +++ b/.github/workflows/image_ubuntu.yml @@ -16,7 +16,7 @@ on: - 'main' - '*-dev' paths: - - '.github/workflows/image.yml' + - '.github/workflows/image_ubuntu.yml' - 'Dockerfile' - 'vllm_ascend/**' push: @@ -27,13 +27,13 @@ on: tags: - 'v*' paths: - - '.github/workflows/image.yml' + - '.github/workflows/image_ubuntu.yml' - 'Dockerfile' - 'vllm_ascend/**' jobs: build: - name: vllm-ascend image + name: vllm-ascend Ubuntu image runs-on: ubuntu-latest steps: @@ -72,9 +72,6 @@ jobs: - name: Build - Set up QEMU uses: docker/setup-qemu-action@v3 - # TODO(yikun): remove this after https://github.com/docker/setup-qemu-action/issues/198 resolved - with: - image: tonistiigi/binfmt:qemu-v7.0.0-28 - name: Build - Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -91,12 +88,11 @@ jobs: uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 - cache-from: type=gha - cache-to: type=gha,mode=max + # use the current repo path as the build context, ensure .git is contained context: . # only trigger when tag, branch/main push push: ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }} labels: ${{ steps.meta.outputs.labels }} tags: ${{ steps.meta.outputs.tags }} build-args: | - PIP_INDEX_URL=https://pypi.org/simple + PIP_INDEX_URL=https://pypi.org/simple diff --git a/Dockerfile b/Dockerfile index fd53b1f1af..dbfff4f512 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,19 +29,19 @@ RUN apt-get update -y && \ WORKDIR /workspace -COPY . /workspace/vllm-ascend/ +COPY . /vllm-workspace/vllm-ascend/ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git ARG VLLM_TAG=v0.7.3 -RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm -RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm +RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install vllm-ascend -RUN python3 -m pip install -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN python3 -m pip install -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode) diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 0f9403cf7f..0365969745 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -25,19 +25,19 @@ RUN yum update -y && \ WORKDIR /workspace -COPY . /workspace/vllm-ascend/ +COPY . /vllm-workspace/vllm-ascend/ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git ARG VLLM_TAG=v0.7.3 -RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm -RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm +RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install vllm-ascend -RUN python3 -m pip install -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ +RUN python3 -m pip install -e /vllm-workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \ python3 -m pip cache purge # Install modelscope (for fast download) and ray (for multinode)