Skip to content

Commit

Permalink
debug
Browse files Browse the repository at this point in the history
  • Loading branch information
mengfei25 committed Jun 19, 2024
1 parent 5091d4b commit 5a3b56b
Showing 1 changed file with 0 additions and 156 deletions.
156 changes: 0 additions & 156 deletions .github/workflows/inductor_xpu_e2e_nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,162 +46,6 @@ jobs:
steps:
- name: Checkout torch-xpu-ops
uses: actions/checkout@v4
- name: Prepare Conda ENV
run: |
which conda
if ! conda env list | grep -q "^e2e_ci "; then
conda create -n e2e_ci python=3.8 cmake ninja -y
fi
source activate e2e_ci
conda install intel::mkl-static intel::mkl-include -y
pip install pandas scipy tqdm
- name: Prepare Stock Pytorch
run: |
pwd
cd ../ && rm -rf pytorch
source activate e2e_ci
git clone -b main https://github.com/pytorch/pytorch pytorch
cd pytorch
# apply PRs for stock pytorch
pip install requests
python ../torch-xpu-ops/.github/scripts/apply_torch_pr.py
git status && git show -s
git submodule sync && git submodule update --init --recursive
rm -rf third_party/torch-xpu-ops && cp -r ../torch-xpu-ops third_party/
# Workaround for torch-xpu-ops ci test
sed -i "s/checkout --quiet \${TORCH_XPU_OPS_COMMIT}/log -n 1/g" caffe2/CMakeLists.txt
- name: Identify pinned versions
run: |
cd ../pytorch
if [ -z ${{ inputs.triton }} ]; then
echo "TRITON_COMMIT_ID=$(<.ci/docker/ci_commit_pins/triton-xpu.txt)" >> "${GITHUB_ENV}"
else
echo "TRITON_COMMIT_ID=${{ inputs.triton }}" >> "${GITHUB_ENV}"
fi
echo "TORCH_BRANCH_ID=$(git rev-parse --abbrev-ref HEAD)" >> "${GITHUB_ENV}"
echo "TORCH_COMMIT_ID=$(git rev-parse HEAD)" >> "${GITHUB_ENV}"
echo "TORCHBENCH_COMMIT_ID=$(<third_party/torch-xpu-ops/.github/ci_commit_pins/torchbench.txt)" >> "${GITHUB_ENV}"
echo "TORCHVISION_COMMIT_ID=$(<.github/ci_commit_pins/vision.txt)" >> "${GITHUB_ENV}"
echo "TORCHTEXT_COMMIT_ID=$(<.github/ci_commit_pins/text.txt)" >> "${GITHUB_ENV}"
echo "TORCHAUDIO_COMMIT_ID=$(<.github/ci_commit_pins/audio.txt)" >> "${GITHUB_ENV}"
echo "TRANSFORMERS_VERSION=$(<.ci/docker/ci_commit_pins/huggingface.txt)" >> "${GITHUB_ENV}"
echo "TIMM_COMMIT_ID=$(<.ci/docker/ci_commit_pins/timm.txt)" >> "${GITHUB_ENV}"
echo ${GITHUB_ENV}
- name: Triton Installation
run: |
source activate e2e_ci
cd ../pytorch
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
echo ${TRITON_REPO}@${TRITON_COMMIT_ID}
pip install --force-reinstall "git+${TRITON_REPO}@${TRITON_COMMIT_ID}#subdirectory=python"
- name: Build Pytorch XPU
run: |
source activate e2e_ci
cd ../pytorch
pip install -r requirements.txt
export USE_XPU=1
source /opt/intel/oneapi/compiler/latest/env/vars.sh
echo "DKMS_VERSION=$(dkms status 2>&1 |grep 'intel-i915-dkms' |sed 's/.*\///;s/,.*//')" >> "${GITHUB_ENV}"
echo "DPCPP_VERSION=$(dpcpp --version 2>&1 |grep 'DPC++/C++' |sed 's/.*(//;s/).*//')" >> "${GITHUB_ENV}"
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
python setup.py bdist_wheel
pip install --force-reinstall dist/*.whl
- name: Show GITHUB_ENV
run: echo "$GITHUB_ENV"
- name: Nightly Huggingface FP32 Inference Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
env_prepare: true
dt: float32
mode: inference
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Huggingface BF16 Inference Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
dt: bfloat16
mode: inference
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Huggingface FP16 Inference Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
dt: float16
mode: inference
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Huggingface FP32 Training Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
env_prepare: true
dt: float32
mode: training
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Huggingface BF16 Training Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
dt: bfloat16
mode: training
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Huggingface FP16 Training Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
dt: float16
mode: training
scenario: accuracy
expected_pass_num: 46
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Torchbench BF16 Training Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: torchbench
dt: bfloat16
mode: training
scenario: accuracy
expected_pass_num: 39
env_prepare: true
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: Nightly Timm_models FP16 Training Accuracy Test
if: ${{ !inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: timm_models
dt: float16
mode: training
scenario: accuracy
expected_pass_num: 22
env_prepare: true
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- name: OnDemand Test (${{ inputs.suite }} ${{ inputs.dt }} ${{ inputs.mode }} ${{ inputs.scenario }})
if: ${{ inputs.suite }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: ${{ inputs.suite }}
env_prepare: true
dt: ${{ inputs.dt }}
mode: ${{ inputs.mode }}
scenario: ${{ inputs.scenario }}
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}

Tests-Failure-And-Report:
if: always()
Expand Down

0 comments on commit 5a3b56b

Please sign in to comment.