Skip to content

[Refactor] Cleanup deprecation of empty td filtering in apply #819

[Refactor] Cleanup deprecation of empty td filtering in apply

[Refactor] Cleanup deprecation of empty td filtering in apply #819

Workflow file for this run

name: Continuous Benchmark
on:
push:
branches:
- main
pull_request:
branches:
- "*"
workflow_dispatch:
permissions:
deployments: write
contents: write
concurrency:
# Documentation suggests ${{ github.head_ref }}, but that's only available on pull_request/pull_request_target triggers, so using ${{ github.ref }}.
# On master, we want all builds to complete even if merging happens faster to make it easier to discover at which point something broke.
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && format('ci-master-{0}', github.sha) || format('ci-{0}', github.ref) }}
cancel-in-progress: true
jobs:
benchmark_cpu:
name: CPU Pytest benchmark
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Setup Environment
run: |
pip install -e .
pip install pytest pytest-benchmark
- name: Run benchmarks
run: |
cd benchmarks/
python -m pytest -vvv --rank 0 --benchmark-json output.json
- name: Store benchmark results
uses: benchmark-action/github-action-benchmark@v1
if: ${{ github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch' }}
with:
name: CPU Benchmark Results
tool: 'pytest'
output-file-path: benchmarks/output.json
fail-on-alert: true
alert-threshold: '200%'
alert-comment-cc-users: '@vmoens'
comment-on-alert: true
github-token: ${{ secrets.GITHUB_TOKEN }}
gh-pages-branch: gh-pages
auto-push: true
benchmark_gpu:
name: GPU Pytest benchmark
runs-on: linux.g5.4xlarge.nvidia.gpu
defaults:
run:
shell: bash -l {0}
container:
image: nvidia/cuda:12.3.0-base-ubuntu22.04
options: --gpus all
steps:
- name: Who triggered this?
run: |
echo "Action triggered by ${{ github.event.pull_request.html_url }}"
- name: Install deps
run: |
export TZ=Europe/London
export DEBIAN_FRONTEND=noninteractive # tzdata bug
apt-get update -y
apt-get update -y
apt-get upgrade -y
apt-get -y install gcc curl g++ unzip wget sudo git cmake
- name: Check ldd --version
run: ldd --version
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 50 # this is to make sure we obtain the target base commit
- name: Python Setup
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Setup conda
run: |
rm -rf $HOME/miniconda
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh
bash ~/miniconda.sh -b -p $HOME/miniconda
- name: setup Path
run: |
echo "$HOME/miniconda/bin" >> $GITHUB_PATH
echo "CONDA=$HOME/miniconda" >> $GITHUB_PATH
- name: create and activate conda env
run: |
$HOME/miniconda/bin/conda create --name build_binary python=${{ matrix.python-version }}
$HOME/miniconda/bin/conda info
$HOME/miniconda/bin/activate build_binary
- name: Setup git
run: git config --global --add safe.directory /__w/tensordict/tensordict
- name: setup Path
run: |
echo /usr/local/bin >> $GITHUB_PATH
- name: Setup Environment
run: |
python -m pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121
python -m pip install -e .
python -m pip install pytest pytest-benchmark
- name: check GPU presence
run: |
python -c """import torch
assert torch.cuda.device_count()
"""
- name: Run benchmarks
run: |
cd benchmarks/
python -m pytest -vvv --rank 0 --benchmark-json output.json
- name: Store benchmark results
uses: benchmark-action/github-action-benchmark@v1
if: ${{ github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch' }}
with:
name: GPU Benchmark Results
tool: 'pytest'
output-file-path: benchmarks/output.json
fail-on-alert: true
alert-threshold: '200%'
alert-comment-cc-users: '@vmoens'
comment-on-alert: true
github-token: ${{ secrets.GITHUB_TOKEN }}
gh-pages-branch: gh-pages
auto-push: true