From 83994986862d0f8999c9816da2ba084e7e0b7775 Mon Sep 17 00:00:00 2001 From: v-chen_data Date: Wed, 20 Nov 2024 11:38:24 -0500 Subject: [PATCH] bump torch --- .github/workflows/docker.yaml | 8 ++++---- .github/workflows/pr-cpu.yaml | 4 ++-- .github/workflows/pr-gpu.yaml | 12 ++++++------ .github/workflows/release.yaml | 4 ++-- README.md | 12 ++++++------ setup.py | 2 +- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 15cf29f95a..39043ef92a 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -17,11 +17,11 @@ jobs: strategy: matrix: include: - - name: "2.4.0_cu124" - base_image: mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu22.04 + - name: "2.5.1_cu124" + base_image: mosaicml/pytorch:2.5.1_cu124-python3.11-ubuntu22.04 dep_groups: "[all]" - - name: "2.4.0_cu124_aws" - base_image: mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu22.04-aws + - name: "2.5.1_cu124_aws" + base_image: mosaicml/pytorch:2.5.1_cu124-python3.11-ubuntu22.04-aws dep_groups: "[all]" steps: diff --git a/.github/workflows/pr-cpu.yaml b/.github/workflows/pr-cpu.yaml index 053ab221db..c500df9cd7 100644 --- a/.github/workflows/pr-cpu.yaml +++ b/.github/workflows/pr-cpu.yaml @@ -21,9 +21,9 @@ jobs: strategy: matrix: include: - - name: "cpu-2.4.0" + - name: "cpu-2.5.1" pip_deps: "[all-cpu]" - container: mosaicml/pytorch:2.4.0_cpu-python3.11-ubuntu22.04 + container: mosaicml/pytorch:2.5.1_cpu-python3.11-ubuntu22.04 markers: "not gpu" pytest_command: "coverage run -m pytest" steps: diff --git a/.github/workflows/pr-gpu.yaml b/.github/workflows/pr-gpu.yaml index 5b91d54442..cdfc4b7b64 100644 --- a/.github/workflows/pr-gpu.yaml +++ b/.github/workflows/pr-gpu.yaml @@ -22,8 +22,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.4.0-1" - container: mosaicml/llm-foundry:2.4.0_cu124-latest + - name: "gpu-2.5.1-1" + container: mosaicml/llm-foundry:2.5.1_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" @@ -51,8 +51,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.4.0-2" - container: mosaicml/llm-foundry:2.4.0_cu124-latest + - name: "gpu-2.5.1-2" + container: mosaicml/llm-foundry:2.5.1_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" @@ -80,8 +80,8 @@ jobs: fail-fast: false matrix: include: - - name: "gpu-2.4.0-4" - container: mosaicml/llm-foundry:2.4.0_cu124-latest + - name: "gpu-2.5.1-4" + container: mosaicml/llm-foundry:2.5.1_cu124-latest markers: "gpu" pip_deps: "[all]" pytest_command: "coverage run -m pytest" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6ad7593b38..19bb050eb7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -93,7 +93,7 @@ jobs: ${{ env.AWS_DOCKER_TAG }} ${{ env.AWS_LATEST_TAG }} build-args: | - BASE_IMAGE=mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu22.04-aws + BASE_IMAGE=mosaicml/pytorch:2.5.1_cu124-python3.11-ubuntu22.04-aws BRANCH_NAME=${{ env.BRANCH_NAME }} DEP_GROUPS=[all] KEEP_FOUNDRY=true @@ -108,7 +108,7 @@ jobs: ${{ env.DOCKER_TAG }} ${{ env.LATEST_TAG }} build-args: | - BASE_IMAGE=mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu22.04 + BASE_IMAGE=mosaicml/pytorch:2.5.1_cu124-python3.11-ubuntu22.04 BRANCH_NAME=${{ env.BRANCH_NAME }} DEP_GROUPS=[all] KEEP_FOUNDRY=true diff --git a/README.md b/README.md index 9dd7ccbc00..61a36e646b 100644 --- a/README.md +++ b/README.md @@ -113,8 +113,8 @@ If you have success/failure using LLM Foundry on other systems, please let us kn | Device | Torch Version | Cuda Version | Status | | -------------- | ------------- | ------------ | ---------------------------- | -| A100-40GB/80GB | 2.4.0 | 12.4 | :white_check_mark: Supported | -| H100-80GB | 2.4.0 | 12.4 | :white_check_mark: Supported | +| A100-40GB/80GB | 2.5.1 | 12.4 | :white_check_mark: Supported | +| H100-80GB | 2.5.1 | 12.4 | :white_check_mark: Supported | ## MosaicML Docker Images We highly recommend using our prebuilt Docker images. You can find them here: https://hub.docker.com/orgs/mosaicml/repositories. @@ -122,15 +122,15 @@ We highly recommend using our prebuilt Docker images. You can find them here: ht The `mosaicml/pytorch` images are pinned to specific PyTorch and CUDA versions, and are stable and rarely updated. The `mosaicml/llm-foundry` images are built with new tags upon every commit to the `main` branch. -You can select a specific commit hash such as `mosaicml/llm-foundry:2.4.0_cu124-36ab1ba` or take the latest one using `mosaicml/llm-foundry:2.4.0_cu124-latest`. +You can select a specific commit hash such as `mosaicml/llm-foundry:2.5.1_cu124-36ab1ba` or take the latest one using `mosaicml/llm-foundry:2.5.1_cu124-latest`. **Please Note:** The `mosaicml/llm-foundry` images do not come with the `llm-foundry` package preinstalled, just the dependencies. You will still need to `pip install llm-foundry` either from PyPi or from source. | Docker Image | Torch Version | Cuda Version | LLM Foundry dependencies installed? | | ------------------------------------------------------ | ------------- | ----------------- | ----------------------------------- | -| `mosaicml/pytorch:2.4.0_cu124-python3.11-ubuntu22.04` | 2.4.0 | 12.4 (Infiniband) | No | -| `mosaicml/llm-foundry:2.4.0_cu124-latest` | 2.4.0 | 12.4 (Infiniband) | Yes | -| `mosaicml/llm-foundry:2.4.0_cu124_aws-latest` | 2.4.0 | 12.4 (EFA) | Yes | +| `mosaicml/pytorch:2.5.1_cu124-python3.11-ubuntu22.04` | 2.5.1 | 12.4 (Infiniband) | No | +| `mosaicml/llm-foundry:2.5.1_cu124-latest` | 2.5.1 | 12.4 (Infiniband) | Yes | +| `mosaicml/llm-foundry:2.5.1_cu124_aws-latest` | 2.5.1 | 12.4 (EFA) | Yes | # Installation diff --git a/setup.py b/setup.py index 566e6aae9c..11d01ae6b4 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ 'accelerate>=0.25,<1.2', # for HF inference `device_map` 'transformers>=4.43.2,<4.47', 'mosaicml-streaming>=0.9.0,<0.10', - 'torch>=2.4.0,<2.4.1', + 'torch>=2.5.1,<2.4.1', 'datasets>=2.20.0,<2.21', 'fsspec==2023.6.0', # newer version results in a bug in datasets that duplicates data 'sentencepiece==0.2.0',