Skip to content

Commit 2604ae4

Browse files
authored
Merge branch 'main' into references-update
2 parents 0639be1 + aaec3d3 commit 2604ae4

File tree

148 files changed

+1528
-1095
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

148 files changed

+1528
-1095
lines changed

.github/workflows/ci.yml

+319
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,319 @@
1+
name: Run all checks
2+
3+
on:
4+
pull_request:
5+
branches:
6+
- main
7+
workflow_dispatch:
8+
inputs:
9+
ref:
10+
description: 'The git ref to build the package for'
11+
required: false
12+
default: ''
13+
type: string
14+
use_lkg:
15+
description: 'Whether to use the last known good versions of dependencies'
16+
required: false
17+
default: True
18+
type: boolean
19+
# nightly
20+
schedule:
21+
- cron: '0 0 * * *'
22+
23+
# Only run once per PR, canceling any previous runs
24+
concurrency:
25+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
26+
cancel-in-progress: true
27+
28+
# Precompute the ref if the workflow was triggered by a workflow dispatch rather than copying this logic repeatedly
29+
env:
30+
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
31+
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
32+
33+
jobs:
34+
eval:
35+
name: Evaluate changes
36+
runs-on: ubuntu-latest
37+
steps:
38+
- uses: actions/checkout@v3
39+
name: Checkout repository
40+
with:
41+
ref: ${{ env.ref }}
42+
fetch-depth: 2
43+
44+
# We want to enforce the following rules for PRs:
45+
# * if all modifications are to README.md
46+
# no testing is needed
47+
# * if there are modifications to docs/* or to any code
48+
# then docs need to be built to verify consistency
49+
# * if there are modifications to notebooks/* or to any code
50+
# then notebooks need to be run to verify consistency
51+
# * for any code changes (or changes to metadata files)
52+
# linting and testing should be run
53+
# For a PR build, HEAD will be the merge commit, and we want to diff against the base branch,
54+
# which will be the first parent: HEAD^
55+
# (For non-PR changes, we will always perform all CI tasks)
56+
# Note that GitHub Actions provides path filters, but they operate at the workflow level, not the job level
57+
- run: |
58+
if ($env:GITHUB_EVENT_NAME -eq 'pull_request') {
59+
$editedFiles = git diff HEAD^ --name-only
60+
$editedFiles # echo edited files to enable easier debugging
61+
$codeChanges = $false
62+
$docChanges = $false
63+
$nbChanges = $false
64+
$changeType = "none"
65+
foreach ($file in $editedFiles) {
66+
switch -Wildcard ($file) {
67+
"README.md" { Continue }
68+
".gitignore" { Continue }
69+
"econml/_version.py" { Continue }
70+
"prototypes/*" { Continue }
71+
"images/*" { Continue }
72+
"doc/*" { $docChanges = $true; Continue }
73+
"notebooks/*" { $nbChanges = $true; Continue }
74+
default { $codeChanges = $true; Continue }
75+
}
76+
}
77+
}
78+
echo "buildDocs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($docChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
79+
echo "buildNbs=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or ($nbChanges -or $codeChanges))" >> $env:GITHUB_OUTPUT
80+
echo "testCode=$(($env:GITHUB_EVENT_NAME -ne 'pull_request') -or $codeChanges)" >> $env:GITHUB_OUTPUT
81+
shell: pwsh
82+
name: Determine type of code change
83+
id: eval
84+
outputs:
85+
buildDocs: ${{ steps.eval.outputs.buildDocs }}
86+
buildNbs: ${{ steps.eval.outputs.buildNbs }}
87+
testCode: ${{ steps.eval.outputs.testCode }}
88+
89+
lint:
90+
name: Lint code
91+
needs: [eval]
92+
if: ${{ needs.eval.outputs.testCode == 'True' }}
93+
runs-on: ubuntu-latest
94+
steps:
95+
- uses: actions/checkout@v3
96+
name: Checkout repository
97+
with:
98+
ref: ${{ env.ref }}
99+
- uses: actions/setup-python@v4
100+
name: Setup Python
101+
with:
102+
python-version: '3.9'
103+
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
104+
name: Ensure latest pip and setuptools
105+
- run: 'pip install pycodestyle && pycodestyle econml'
106+
107+
notebooks:
108+
name: Run notebooks
109+
needs: [eval]
110+
if: ${{ needs.eval.outputs.buildNbs == 'True' }}
111+
runs-on: ubuntu-latest
112+
strategy:
113+
matrix:
114+
kind: [except-customer-scenarios, customer-scenarios]
115+
include:
116+
- kind: "except-customer-scenarios"
117+
extras: "[tf,plt]"
118+
pattern: "(?!CustomerScenarios)"
119+
install_graphviz: true
120+
version: '3.8' # no supported version of tensorflow for 3.9
121+
- kind: "customer-scenarios"
122+
extras: "[plt,dowhy]"
123+
pattern: "CustomerScenarios"
124+
version: '3.9'
125+
install_graphviz: false
126+
fail-fast: false
127+
steps:
128+
- uses: actions/checkout@v3
129+
name: Checkout repository
130+
with:
131+
ref: ${{ env.ref }}
132+
- uses: actions/setup-python@v4
133+
name: Setup Python
134+
with:
135+
python-version: ${{ matrix.version }}
136+
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
137+
name: Ensure latest pip and setuptools
138+
- run: sudo apt-get -yq install graphviz
139+
name: Install graphviz
140+
if: ${{ matrix.install_graphviz }}
141+
# Add verbose flag to pip installation if in debug mode
142+
- run: pip install -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg-notebook.txt' }}
143+
name: Install econml
144+
# Install notebook requirements (if not already done as part of lkg)
145+
- run: pip install jupyter jupyter-client nbconvert nbformat seaborn xgboost tqdm
146+
name: Install notebook requirements
147+
if: ${{ !env.use_lkg }}
148+
- run: pip freeze --exclude-editable > notebooks-${{ matrix.version }}-${{ matrix.kind }}-requirements.txt
149+
name: Save installed packages
150+
- uses: actions/upload-artifact@v3
151+
name: Upload installed packages
152+
with:
153+
name: requirements
154+
path: notebooks-${{ matrix.version }}-${{ matrix.kind }}-requirements.txt
155+
- run: pip install pytest pytest-runner coverage
156+
name: Install pytest
157+
158+
- run: python setup.py pytest
159+
name: Run notebook tests
160+
id: run_tests
161+
env:
162+
PYTEST_ADDOPTS: '-m "notebook"'
163+
NOTEBOOK_DIR_PATTERN: ${{ matrix.pattern }}
164+
COVERAGE_PROCESS_START: 'setup.cfg'
165+
- run: mv .coverage .coverage.${{ matrix.kind }}
166+
# Run whether or not the tests passed, but only if they ran at all
167+
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
168+
name: Make coverage filename unique
169+
- uses: actions/upload-artifact@v3
170+
name: Upload coverage report
171+
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
172+
with:
173+
name: coverage
174+
path: .coverage.${{ matrix.kind }}
175+
176+
tests:
177+
name: "Run tests"
178+
needs: [eval]
179+
if: ${{ needs.eval.outputs.testCode == 'True' }}
180+
strategy:
181+
matrix:
182+
os: [ubuntu-latest, windows-latest, macos-latest]
183+
python-version: ['3.7', '3.8', '3.9', '3.10']
184+
kind: [serial, other, dml, main, treatment]
185+
exclude:
186+
# Serial tests fail randomly on mac sometimes, so we don't run them there
187+
- os: macos-latest
188+
kind: serial
189+
# Python 3.7 is broken on the mac runner image, see https://github.com/actions/runner-images/issues/7764
190+
- os: macos-latest
191+
python-version: '3.7'
192+
# Assign the correct package and testing options for each kind of test
193+
include:
194+
- kind: serial
195+
opts: '-m "serial" -n 1'
196+
extras: "[tf,plt]"
197+
- kind: other
198+
opts: '-m "cate_api" -n auto'
199+
extras: "[tf,plt]"
200+
- kind: dml
201+
opts: '-m "dml"'
202+
extras: "[tf,plt]"
203+
- kind: main
204+
opts: '-m "not (notebook or automl or dml or serial or cate_api or treatment_featurization)" -n 2'
205+
extras: "[tf,plt,dowhy]"
206+
- kind: treatment
207+
opts: '-m "treatment_featurization" -n auto'
208+
extras: "[tf,plt]"
209+
fail-fast: false
210+
runs-on: ${{ matrix.os }}
211+
steps:
212+
- uses: actions/checkout@v3
213+
name: Checkout repository
214+
with:
215+
ref: ${{ env.ref }}
216+
- uses: actions/setup-python@v4
217+
name: Setup Python
218+
with:
219+
python-version: ${{ matrix.python-version }}
220+
- run: python -m pip install --upgrade pip && pip install --upgrade setuptools
221+
name: Ensure latest pip and setuptools
222+
# Add verbose flag to pip installation if in debug mode
223+
- run: pip install -e .${{ matrix.extras }} ${{ fromJSON('["","-v"]')[runner.debug] }} ${{ env.use_lkg && '-r lkg.txt' }}
224+
name: Install econml
225+
- run: pip freeze --exclude-editable > tests-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}-requirements.txt
226+
name: Save installed packages
227+
- uses: actions/upload-artifact@v3
228+
name: Upload installed packages
229+
with:
230+
name: requirements
231+
path: tests-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}-requirements.txt
232+
- run: pip install pytest pytest-runner coverage
233+
name: Install pytest
234+
- run: python setup.py pytest
235+
name: Run tests
236+
id: run_tests
237+
env:
238+
PYTEST_ADDOPTS: ${{ matrix.opts }}
239+
COVERAGE_PROCESS_START: 'setup.cfg'
240+
- run: mv .coverage .coverage.${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}
241+
# Run whether or not the tests passed, but only if they ran at all
242+
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
243+
name: Make coverage filename unique
244+
- uses: actions/upload-artifact@v3
245+
name: Upload coverage report
246+
if: success() || failure() && contains(fromJSON('["success", "failure"]'), steps.run_tests.outcome)
247+
with:
248+
name: coverage
249+
path: .coverage.${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.kind }}
250+
251+
coverage-report:
252+
name: "Coverage report"
253+
needs: [tests, notebooks]
254+
if: success() || failure()
255+
runs-on: ubuntu-latest
256+
steps:
257+
- uses: actions/checkout@v3
258+
name: Checkout repository
259+
with:
260+
ref: ${{ env.ref }}
261+
- uses: actions/download-artifact@v3
262+
name: Get coverage reports
263+
with:
264+
name: coverage
265+
path: coverage
266+
- uses: actions/setup-python@v4
267+
name: Setup Python
268+
with:
269+
python-version: '3.8'
270+
- run: pip install coverage
271+
name: Install coverage
272+
- run: coverage combine coverage/
273+
name: Combine coverage reports
274+
- run: coverage report -m --format=markdown > $GITHUB_STEP_SUMMARY
275+
name: Generate coverage report
276+
- run: coverage html
277+
name: Generate coverage html --fail-under=86
278+
- uses: actions/upload-artifact@v3
279+
name: Upload coverage report
280+
with:
281+
name: coverage
282+
path: htmlcov
283+
284+
build:
285+
name: Build package
286+
needs: [eval]
287+
if: ${{ needs.eval.outputs.testCode == 'True' }}
288+
uses: ./.github/workflows/publish-package.yml
289+
with:
290+
publish: false
291+
repository: testpypi
292+
# don't have access to env context here for some reason
293+
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
294+
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
295+
296+
docs:
297+
name: Build documentation
298+
needs: [eval]
299+
if: ${{ needs.eval.outputs.buildDocs == 'True' }}
300+
uses: ./.github/workflows/publish-documentation.yml
301+
with:
302+
publish: false
303+
environment: test
304+
# don't have access to env context here for some reason
305+
ref: ${{ github.event_name == 'workflow_dispatch' && inputs.ref || null }}
306+
use_lkg: ${{ (github.event_name == 'workflow_dispatch' && inputs.use_lkg) || github.event_name == 'pull_request' }}
307+
308+
verify:
309+
name: Verify CI checks
310+
needs: [lint, notebooks, tests, build, docs]
311+
if: always()
312+
runs-on: ubuntu-latest
313+
steps:
314+
- run: exit 1
315+
name: At least one check failed or was cancelled
316+
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
317+
- run: exit 0
318+
name: All checks passed
319+
if: ${{ !(contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')) }}

0 commit comments

Comments
 (0)