Skip to content

Commit

Permalink
Workflow fixes (#11270)
Browse files Browse the repository at this point in the history
  • Loading branch information
LysandreJik authored Apr 16, 2021
1 parent dfc6dd8 commit 5254220
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 2 deletions.
16 changes: 16 additions & 0 deletions .github/workflows/self-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ jobs:
apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks
run: |
Expand Down Expand Up @@ -121,6 +122,7 @@ jobs:
apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks
run: |
Expand Down Expand Up @@ -220,6 +222,13 @@ jobs:
if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt

- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_gpu_test_reports
path: reports

run_tests_torch_cuda_extensions_multi_gpu:
runs-on: [self-hosted, docker-gpu, multi-gpu]
container:
Expand Down Expand Up @@ -253,6 +262,13 @@ jobs:
if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt

- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
path: reports


send_results:
name: Send results to webhook
Expand Down
20 changes: 18 additions & 2 deletions .github/workflows/self-scheduled.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ jobs:
run: |
apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech,deepspeed]
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks
run: |
Expand Down Expand Up @@ -155,7 +156,8 @@ jobs:
run: |
apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech,deepspeed,fairscale]
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks
run: |
Expand Down Expand Up @@ -279,6 +281,13 @@ jobs:
if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt

- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_gpu_test_reports
path: reports

run_all_tests_torch_cuda_extensions_multi_gpu:
runs-on: [self-hosted, docker-gpu, multi-gpu]
container:
Expand Down Expand Up @@ -312,6 +321,13 @@ jobs:
if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt

- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
path: reports

send_results:
name: Send results to webhook
runs-on: ubuntu-latest
Expand Down
12 changes: 12 additions & 0 deletions utils/notification_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,13 +128,25 @@ def format_for_slack(total_results, results, scheduled: bool):
"common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt",
"pipeline": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_pipeline_multi_gpu_[].txt",
},
"Torch Cuda Extensions Single GPU": {
"common": "run_tests_torch_cuda_extensions_gpu_test_reports/tests_torch_cuda_extensions_gpu_[].txt"
},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/tests_torch_cuda_extensions_multi_gpu_[].txt"
},
}
else:
file_paths = {
"TF Single GPU": {"common": "run_all_tests_tf_gpu_test_reports/tests_tf_gpu_[].txt"},
"Torch Single GPU": {"common": "run_all_tests_torch_gpu_test_reports/tests_torch_gpu_[].txt"},
"TF Multi GPU": {"common": "run_all_tests_tf_multi_gpu_test_reports/tests_tf_multi_gpu_[].txt"},
"Torch Multi GPU": {"common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt"},
"Torch Cuda Extensions Single GPU": {
"common": "run_tests_torch_cuda_extensions_gpu_test_reports/tests_torch_cuda_extensions_gpu_[].txt"
},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/tests_torch_cuda_extensions_multi_gpu_[].txt"
},
}

client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
Expand Down

0 comments on commit 5254220

Please sign in to comment.