diff --git a/.github/workflows/notify_test_workflow.yml b/.github/workflows/notify_test_workflow.yml index 2e459ca8204d..da93088c5dfe 100644 --- a/.github/workflows/notify_test_workflow.yml +++ b/.github/workflows/notify_test_workflow.yml @@ -5,6 +5,7 @@ on: jobs: notify: + name: Notify test workflow runs-on: ubuntu-20.04 steps: - name: "Notify test workflow" @@ -14,6 +15,10 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} script: | const endpoint = "GET /repos/:owner/:repo/actions/workflows/:id/runs?&branch=:branch" + + // TODO: Should use pull_request.user and pull_request.user.repos_url? + // If a different person creates a commit to another forked repo, + // it would be able to detect. const params = { owner: context.payload.pull_request.head.repo.owner.login, repo: context.payload.pull_request.head.repo.name, @@ -22,19 +27,28 @@ jobs: } const runs = await github.request(endpoint, params) - var runID = runs.data.workflow_runs[0].id + const runID = runs.data.workflow_runs[0].id + const runUrl = "https://github.com/" + + context.payload.pull_request.head.repo.full_name + + "/actions/runs/" + + runID - var msg = "**[Test build #" + runID + "]" - + "(https://github.com/" + context.payload.pull_request.head.repo.full_name - + "/actions/runs/" + runID + ")** " - + "for PR " + context.issue.number - + " at commit [`" + context.payload.pull_request.head.sha.substring(0, 7) + "`]" - + "(https://github.com/" + context.payload.pull_request.head.repo.full_name - + "/commit/" + context.payload.pull_request.head.sha + ")." + const name = 'Build and test' + const head_sha = context.payload.pull_request.head.sha + const status = 'in_progress' - github.issues.createComment({ - issue_number: context.issue.number, - owner: context.payload.repository.owner.login, - repo: context.payload.repository.name, - body: msg + github.checks.create({ + ...context.repo, + name, + head_sha, + status, + output: { + title: 'Test results', + summary: runUrl, + text: JSON.stringify({ + owner: context.payload.pull_request.head.repo.owner.login, + repo: context.payload.pull_request.head.repo.name, + run_id: runID + }) + } }) diff --git a/.github/workflows/update_build_status.yml b/.github/workflows/update_build_status.yml new file mode 100644 index 000000000000..d2e920d8b97b --- /dev/null +++ b/.github/workflows/update_build_status.yml @@ -0,0 +1,54 @@ +name: Update build status + +on: + schedule: + - cron: "*/15 * * * *" + +jobs: + update: + name: Update build status + runs-on: ubuntu-20.04 + steps: + - name: "Update build status" + uses: actions/github-script@v3 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const endpoint = "GET /repos/:owner/:repo/pulls?state=:state" + const params = { + owner: context.repo.owner, + repo: context.repo.repo, + state: 'open' + } + + const maybeReady = ["clean", "has_hooks", "unknown", "unstable"]; + const notReady = ["dirty", "draft"]; + + for await (const prs of github.paginate.iterator(endpoint,params)) { + for await (const pr of prs.data) { + if (pr.mergeable_state == null || maybeReady.includes(pr.mergeable_state)) { + const checkRuns = await github.request('GET /repos/{owner}/{repo}/commits/{ref}/check-runs', { + owner: context.repo.owner, + repo: context.repo.repo, + ref: pr.head.sha + }) + + for await (const cr of checkRuns.data.check_runs) { + if (cr.name == "Build and test") { + const params = JSON.parse(cr.output.text) // text contains parameters to make request in JSON + const run = await github.request('GET /repos/{owner}/{repo}/actions/runs/{run_id}', params) + const response = await github.request('PATCH /repos/{owner}/{repo}/check-runs/{check_run_id}', { + owner: context.repo.owner, + repo: context.repo.repo, + check_run_id: cr.id, + output: cr.output, + status: run.data.status, + conclusion: run.data.conclusion + }) + + break + } + } + } + } + } diff --git a/README.md b/README.md index aa7d1dd338be..c46a010af968 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Apache Spark +B Spark is a unified analytics engine for large-scale data processing. It provides high-level APIs in Scala, Java, Python, and R, and an optimized engine that supports general computation graphs for data analysis. It also supports a