Documentation #655
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: 🐧 CUDA | |
on: [push, pull_request] | |
concurrency: | |
group: ${{ github.ref }}-${{ github.head_ref }}-cuda | |
cancel-in-progress: true | |
jobs: | |
# Ref.: | |
# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/ubuntu18.04/10.1/base/Dockerfile | |
# https://github.com/ComputationalRadiationPhysics/picongpu/blob/0.5.0/share/picongpu/dockerfiles/ubuntu-1604/Dockerfile | |
# https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ | |
build_nvcc: | |
name: NVCC 11.0.3 SP | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
env: | |
CXXFLAGS: "-Werror" | |
CMAKE_GENERATOR: Ninja | |
# setuptools/mp4py work-around, see | |
# https://github.com/mpi4py/mpi4py/pull/159 | |
# https://github.com/mpi4py/mpi4py/issues/157#issuecomment-1001022274 | |
SETUPTOOLS_USE_DISTUTILS: stdlib | |
steps: | |
- uses: actions/checkout@v3 | |
- uses: actions/setup-python@v4 | |
name: Install Python | |
with: | |
python-version: '3.x' | |
- name: install dependencies | |
run: | | |
.github/workflows/dependencies/nvcc11-0.sh | |
- name: CCache Cache | |
uses: actions/cache@v3 | |
# - once stored under a key, they become immutable (even if local cache path content changes) | |
# - for a refresh the key has to change, e.g., hash of a tracked file in the key | |
with: | |
path: | | |
~/.ccache | |
~/.cache/ccache | |
key: ccache-cuda-nvcc-${{ hashFiles('.github/workflows/cuda.yml') }}-${{ hashFiles('cmake/dependencies/AMReX.cmake') }} | |
restore-keys: | | |
ccache-cuda-nvcc-${{ hashFiles('.github/workflows/cuda.yml') }}- | |
ccache-cuda-nvcc- | |
- name: install openPMD-api | |
run: | | |
export CEI_SUDO="sudo" | |
export CEI_TMP="/tmp/cei" | |
cmake-easyinstall --prefix=/usr/local \ | |
git+https://github.com/openPMD/openPMD-api.git@0.15.1 \ | |
-DopenPMD_USE_PYTHON=OFF \ | |
-DBUILD_TESTING=OFF \ | |
-DBUILD_EXAMPLES=OFF \ | |
-DBUILD_CLI_TOOLS=OFF \ | |
-DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON | |
- name: build WarpX | |
run: | | |
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} | |
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} | |
which nvcc || echo "nvcc not in PATH!" | |
cmake -S . -B build_sp \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON \ | |
-DWarpX_COMPUTE=CUDA \ | |
-DWarpX_EB=ON \ | |
-DWarpX_LIB=ON \ | |
-DAMReX_CUDA_ARCH=6.0 \ | |
-DWarpX_OPENPMD=ON \ | |
-DWarpX_openpmd_internal=OFF \ | |
-DWarpX_PRECISION=SINGLE \ | |
-DWarpX_PSATD=ON \ | |
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \ | |
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON | |
cmake --build build_sp -j 2 | |
python3 -m pip install --upgrade pip setuptools wheel | |
export WARPX_MPI=ON | |
PYWARPX_LIB_DIR=$PWD/build_sp/lib python3 -m pip wheel . | |
python3 -m pip install *.whl | |
# make sure legacy build system continues to build, i.e., that we don't forget | |
# to add new .cpp files | |
build_nvcc_gnumake: | |
name: NVCC 11.8.0 GNUmake | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
steps: | |
- uses: actions/checkout@v3 | |
- name: install dependencies | |
run: | | |
.github/workflows/dependencies/nvcc11-8.sh | |
- name: CCache Cache | |
uses: actions/cache@v3 | |
# - once stored under a key, they become immutable (even if local cache path content changes) | |
# - for a refresh the key has to change, e.g., hash of a tracked file in the key | |
with: | |
path: | | |
~/.ccache | |
~/.cache/ccache | |
key: ccache-cuda-gnumake-${{ hashFiles('.github/workflows/cuda.yml') }}-${{ hashFiles('cmake/dependencies/AMReX.cmake') }} | |
restore-keys: | | |
ccache-cuda-gnumake-${{ hashFiles('.github/workflows/cuda.yml') }}- | |
ccache-cuda-gnumake- | |
- name: build WarpX | |
run: | | |
export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} | |
export LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} | |
which nvcc || echo "nvcc not in PATH!" | |
git clone https://github.com/AMReX-Codes/amrex.git ../amrex | |
cd ../amrex && git checkout --detach 9d2f762f4897b5f585544baf87a19edd0f0c9560 && cd - | |
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 | |
build_nvhpc21-11-nvcc: | |
name: NVHPC@21.11 NVCC/NVC++ Release [tests] | |
runs-on: ubuntu-20.04 | |
if: github.event.pull_request.draft == false | |
env: | |
# For NVHPC, Ninja is slower than the default: | |
#CMAKE_GENERATOR: Ninja | |
# setuptools/mp4py work-around, see | |
# https://github.com/mpi4py/mpi4py/pull/159 | |
# https://github.com/mpi4py/mpi4py/issues/157#issuecomment-1001022274 | |
SETUPTOOLS_USE_DISTUTILS: stdlib | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Dependencies | |
run: .github/workflows/dependencies/nvhpc.sh | |
- name: CCache Cache | |
uses: actions/cache@v3 | |
# - once stored under a key, they become immutable (even if local cache path content changes) | |
# - for a refresh the key has to change, e.g., hash of a tracked file in the key | |
with: | |
path: | | |
~/.ccache | |
~/.cache/ccache | |
key: ccache-cuda-nvhpc-${{ hashFiles('.github/workflows/cuda.yml') }}-${{ hashFiles('cmake/dependencies/AMReX.cmake') }} | |
restore-keys: | | |
ccache-cuda-nvhpc-${{ hashFiles('.github/workflows/cuda.yml') }}- | |
ccache-cuda-nvhpc- | |
- name: Build & Install | |
run: | | |
source /etc/profile.d/modules.sh | |
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/21.11 | |
which nvcc || echo "nvcc not in PATH!" | |
which nvc++ || echo "nvc++ not in PATH!" | |
which nvc || echo "nvc not in PATH!" | |
nvcc --version | |
nvc++ --version | |
nvc --version | |
cmake --version | |
export CC=$(which nvc) | |
export CXX=$(which nvc++) | |
export CUDACXX=$(which nvcc) | |
export CUDAHOSTCXX=${CXX} | |
cmake -S . -B build \ | |
-DCMAKE_VERBOSE_MAKEFILE=ON \ | |
-DWarpX_COMPUTE=CUDA \ | |
-DWarpX_EB=ON \ | |
-DWarpX_LIB=ON \ | |
-DAMReX_CUDA_ARCH=8.0 \ | |
-DWarpX_OPENPMD=ON \ | |
-DWarpX_PSATD=ON \ | |
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \ | |
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON | |
cmake --build build -j 2 | |
# work-around for mpi4py 3.1.1 build system issue with using | |
# a GNU-built Python executable with non-GNU Python modules | |
# https://github.com/mpi4py/mpi4py/issues/114 | |
export CFLAGS="-noswitcherror" | |
python3 -m pip install --upgrade pip setuptools wheel | |
export WARPX_MPI=ON | |
PYWARPX_LIB_DIR=$PWD/build/lib python3 -m pip wheel . | |
python3 -m pip install *.whl |