Skip to content

Support for GlobusComputeExecutor #4361

Support for GlobusComputeExecutor

Support for GlobusComputeExecutor #4361

Workflow file for this run

name: Parsl
on:
pull_request:
types:
- opened
- synchronize
merge_group:
jobs:
main-test-suite:
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
runs-on: ubuntu-20.04
timeout-minutes: 60
steps:
- uses: actions/checkout@master
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Collect Job Information
id: job-info
run: |
echo "Python Version: ${{ matrix.python-version }}" >> ci_job_info.txt
echo "CI Triggering Event: ${{ github.event_name }}" >> ci_job_info.txt
echo "Triggering Git Ref: ${{ github.ref }}" >> ci_job_info.txt
echo "Triggering Git SHA: ${{ github.sha }}" >> ci_job_info.txt
echo "Workflow Run: ${{ github.run_number }}" >> ci_job_info.txt
echo "Workflow Attempt: ${{ github.run_attempt }}" >> ci_job_info.txt
as_ascii="$(echo "${{ github.ref_name }}" | perl -pe "s/[^A-z0-9-]+/-/g; s/^-+|-+\$//g; s/--+/-/g;")"
echo "as-ascii=$as_ascii" >> $GITHUB_OUTPUT
- name: Non-requirements based install
run: |
# libpython3.5: make workqueue binary installer happy
# mpich: required by radical executor
sudo apt-get update -q
sudo apt-get install -qy libpython3.5 mpich
- name: setup virtual env
run: |
make virtualenv
source .venv/bin/activate
- name: make deps clean_coverage
run: |
source .venv/bin/activate
make deps
make clean_coverage
- name: make test
run: |
source .venv/bin/activate
# temporary; until test-matrixification
export PARSL_TEST_PRESERVE_NUM_RUNS=7
make test
ln -s pytest-parsl/parsltest-current test_runinfo
- name: Documentation checks
run: |
source .venv/bin/activate
pip install .[docs]
sudo apt-get install -y pandoc
cd docs
test ! -e stubs
# check we can build the docs without warnings
PYTHONPATH=/tmp/cctools/lib/python3.8/site-packages make SPHINXOPTS=-W html
cd ..
# assert that none of the runs in this test have put an ERROR message into a
# database manager log file or monitoring router log file. It would be better if
# the tests themselves failed immediately when there was a monitoring error, but
# in the absence of that, this is a dirty way to check.
bash -c '! grep ERROR pytest-parsl/parsltest-current/runinfo*/*/database_manager.log'
bash -c '! grep ERROR pytest-parsl/parsltest-current/runinfo*/*/monitoring_router.log'
# temporary; until test-matrixification
rm -f pytest-parsl/parsltest-current test_runinfo
- name: Checking parsl-visualize
run: |
source .venv/bin/activate
sudo apt-get install -y graphviz
pip install .[monitoring,visualization]
parsl/tests/test-viz.sh
- name: make coverage
run: |
source .venv/bin/activate
make coverage
- name: Archive runinfo logs
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: runinfo-${{ matrix.python-version }}-${{ steps.job-info.outputs.as-ascii }}-${{ github.sha }}
path: |
runinfo/
pytest-parsl/
ci_job_info.txt
compression-level: 9