Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.

Trial crates publishing automation #12880

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
342 changes: 179 additions & 163 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -257,19 +257,19 @@ variables:

#### stage: .pre

skip-if-draft:
extends: .kubernetes-env
variables:
CI_IMAGE: "paritytech/tools:latest"
stage: .pre
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
script:
- echo "Commit message is ${CI_COMMIT_MESSAGE}"
- echo "Ref is ${CI_COMMIT_REF_NAME}"
- echo "pipeline source is ${CI_PIPELINE_SOURCE}"
- ./scripts/ci/gitlab/skip_if_draft.sh
allow_failure: true
# skip-if-draft:
# extends: .kubernetes-env
# variables:
# CI_IMAGE: "paritytech/tools:latest"
# stage: .pre
# rules:
# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
# script:
# - echo "Commit message is ${CI_COMMIT_MESSAGE}"
# - echo "Ref is ${CI_COMMIT_REF_NAME}"
# - echo "pipeline source is ${CI_PIPELINE_SOURCE}"
# - ./scripts/ci/gitlab/skip_if_draft.sh
# allow_failure: true

check-crates-publishing-pipeline:
stage: .pre
Expand All @@ -283,155 +283,171 @@ check-crates-publishing-pipeline:
https://github.com/paritytech/releng-scripts.git
- ONLY_CHECK_PIPELINE=true ./releng-scripts/publish-crates

include:
# check jobs
- scripts/ci/gitlab/pipeline/check.yml
# tests jobs
- scripts/ci/gitlab/pipeline/test.yml
# build jobs
- scripts/ci/gitlab/pipeline/build.yml
# publish jobs
- scripts/ci/gitlab/pipeline/publish.yml
# zombienet jobs
- scripts/ci/gitlab/pipeline/zombienet.yml
# The crate-publishing pipeline requires a customized `interruptible` configuration. Unfortunately
# `interruptible` can't currently be dynamically set based on variables as per:
# - https://gitlab.com/gitlab-org/gitlab/-/issues/38349
# - https://gitlab.com/gitlab-org/gitlab/-/issues/194023
# Thus we work around that limitation by using conditional includes.
# For crate-publishing pipelines: run it with defaults + `interruptible: false`. The WHOLE
# pipeline is made uninterruptible to ensure that test jobs also get a chance to run to
# completion, because the publishing jobs depends on them AS INTENDED: crates should not be
# published before their source code is checked.
- local: scripts/ci/gitlab/crate-publishing-pipeline.yml
rules:
- if: $PIPELINE == "automatic-crate-publishing"
# For normal pipelines: run it with defaults + `interruptible: true`
- local: scripts/ci/gitlab/default-pipeline.yml
rules:
- if: $PIPELINE != "automatic-crate-publishing"

#### stage: deploy

deploy-prometheus-alerting-rules:
stage: deploy
needs:
- job: test-prometheus-alerting-rules
artifacts: false
allow_failure: true
trigger:
project: parity/infrastructure/cloud-infra
variables:
SUBSTRATE_CI_COMMIT_NAME: "${CI_COMMIT_REF_NAME}"
SUBSTRATE_CI_COMMIT_REF: "${CI_COMMIT_SHORT_SHA}"
UPSTREAM_TRIGGER_PROJECT: "${CI_PROJECT_PATH}"
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_COMMIT_REF_NAME == "master"
changes:
- .gitlab-ci.yml
- ./scripts/ci/monitoring/**/*

#### stage: notify

# This job notifies rusty-cachier about the latest commit with the cache.
# This info is later used for the cache distribution and an overlay creation.
# Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier.
rusty-cachier-notify:
stage: notify
extends: .kubernetes-env
publish-crates-locally:
extends:
- .test-refs
- .crates-publishing-template
timeout: 6h
variables:
CI_IMAGE: paritytech/rusty-cachier-env:latest
GIT_STRATEGY: none
dependencies: []
SPUB_PUBLISH_ALL: "true"
script:
- curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
- rusty-cachier cache notify

#### stage: .post

# This job cancels the whole pipeline if any of provided jobs fail.
# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
# to fail the pipeline as soon as possible to shorten the feedback loop.
.cancel-pipeline-template:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
when: on_failure
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "${FAILED_JOB_URL}"
FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
PR_NUM: "${PR_NUM}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"

remove-cancel-pipeline-message:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "https://gitlab.com"
FAILED_JOB_NAME: "nope"
PR_NUM: "${CI_COMMIT_REF_NAME}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"
branch: "as-improve"

# need to copy jobs this way because otherwise gitlab will wait
# for all 3 jobs to finish instead of cancelling if one fails
cancel-pipeline-test-linux-stable1:
extends: .cancel-pipeline-template
needs:
- job: "test-linux-stable 1/3"

cancel-pipeline-test-linux-stable2:
extends: .cancel-pipeline-template
needs:
- job: "test-linux-stable 2/3"

cancel-pipeline-test-linux-stable3:
extends: .cancel-pipeline-template
needs:
- job: "test-linux-stable 3/3"

cancel-pipeline-cargo-check-benches1:
extends: .cancel-pipeline-template
needs:
- job: "cargo-check-benches 1/2"

cancel-pipeline-cargo-check-benches2:
extends: .cancel-pipeline-template
needs:
- job: "cargo-check-benches 2/2"

cancel-pipeline-test-linux-stable-int:
extends: .cancel-pipeline-template
needs:
- job: test-linux-stable-int

cancel-pipeline-cargo-check-each-crate-1:
extends: .cancel-pipeline-template
needs:
- job: "cargo-check-each-crate 1/2"

cancel-pipeline-cargo-check-each-crate-2:
extends: .cancel-pipeline-template
needs:
- job: "cargo-check-each-crate 2/2"

cancel-pipeline-cargo-check-each-crate-macos:
extends: .cancel-pipeline-template
needs:
- job: cargo-check-each-crate-macos

cancel-pipeline-check-tracing:
extends: .cancel-pipeline-template
needs:
- job: check-tracing
- rusty-cachier snapshot create
- git clone
--depth 1
--branch crates-publishing
https://github.com/paritytech/releng-scripts.git
- CRATESIO_TARGET_INSTANCE=local ./releng-scripts/publish-crates
- rusty-cachier cache upload

# include:
# # check jobs
# - scripts/ci/gitlab/pipeline/check.yml
# # tests jobs
# - scripts/ci/gitlab/pipeline/test.yml
# # build jobs
# - scripts/ci/gitlab/pipeline/build.yml
# # publish jobs
# - scripts/ci/gitlab/pipeline/publish.yml
# # zombienet jobs
# - scripts/ci/gitlab/pipeline/zombienet.yml
# # The crate-publishing pipeline requires a customized `interruptible` configuration. Unfortunately
# # `interruptible` can't currently be dynamically set based on variables as per:
# # - https://gitlab.com/gitlab-org/gitlab/-/issues/38349
# # - https://gitlab.com/gitlab-org/gitlab/-/issues/194023
# # Thus we work around that limitation by using conditional includes.
# # For crate-publishing pipelines: run it with defaults + `interruptible: false`. The WHOLE
# # pipeline is made uninterruptible to ensure that test jobs also get a chance to run to
# # completion, because the publishing jobs depends on them AS INTENDED: crates should not be
# # published before their source code is checked.
# - local: scripts/ci/gitlab/crate-publishing-pipeline.yml
# rules:
# - if: $PIPELINE == "automatic-crate-publishing"
# # For normal pipelines: run it with defaults + `interruptible: true`
# - local: scripts/ci/gitlab/default-pipeline.yml
# rules:
# - if: $PIPELINE != "automatic-crate-publishing"
#
# #### stage: deploy
#
# deploy-prometheus-alerting-rules:
# stage: deploy
# needs:
# - job: test-prometheus-alerting-rules
# artifacts: false
# allow_failure: true
# trigger:
# project: parity/infrastructure/cloud-infra
# variables:
# SUBSTRATE_CI_COMMIT_NAME: "${CI_COMMIT_REF_NAME}"
# SUBSTRATE_CI_COMMIT_REF: "${CI_COMMIT_SHORT_SHA}"
# UPSTREAM_TRIGGER_PROJECT: "${CI_PROJECT_PATH}"
# rules:
# - if: $CI_PIPELINE_SOURCE == "pipeline"
# when: never
# - if: $CI_COMMIT_REF_NAME == "master"
# changes:
# - .gitlab-ci.yml
# - ./scripts/ci/monitoring/**/*
#
# #### stage: notify
#
# # This job notifies rusty-cachier about the latest commit with the cache.
# # This info is later used for the cache distribution and an overlay creation.
# # Note that we don't use any .rusty-cachier references as we assume that a pipeline has reached this stage with working rusty-cachier.
# rusty-cachier-notify:
# stage: notify
# extends: .kubernetes-env
# variables:
# CI_IMAGE: paritytech/rusty-cachier-env:latest
# GIT_STRATEGY: none
# dependencies: []
# script:
# - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash
# - rusty-cachier cache notify
#
# #### stage: .post
#
# # This job cancels the whole pipeline if any of provided jobs fail.
# # In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
# # to fail the pipeline as soon as possible to shorten the feedback loop.
# .cancel-pipeline-template:
# stage: .post
# rules:
# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
# when: on_failure
# variables:
# PROJECT_ID: "${CI_PROJECT_ID}"
# PROJECT_NAME: "${CI_PROJECT_NAME}"
# PIPELINE_ID: "${CI_PIPELINE_ID}"
# FAILED_JOB_URL: "${FAILED_JOB_URL}"
# FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
# PR_NUM: "${PR_NUM}"
# trigger:
# project: "parity/infrastructure/ci_cd/pipeline-stopper"
#
# remove-cancel-pipeline-message:
# stage: .post
# rules:
# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
# variables:
# PROJECT_ID: "${CI_PROJECT_ID}"
# PROJECT_NAME: "${CI_PROJECT_NAME}"
# PIPELINE_ID: "${CI_PIPELINE_ID}"
# FAILED_JOB_URL: "https://gitlab.com"
# FAILED_JOB_NAME: "nope"
# PR_NUM: "${CI_COMMIT_REF_NAME}"
# trigger:
# project: "parity/infrastructure/ci_cd/pipeline-stopper"
# branch: "as-improve"
#
# # need to copy jobs this way because otherwise gitlab will wait
# # for all 3 jobs to finish instead of cancelling if one fails
# cancel-pipeline-test-linux-stable1:
# extends: .cancel-pipeline-template
# needs:
# - job: "test-linux-stable 1/3"
#
# cancel-pipeline-test-linux-stable2:
# extends: .cancel-pipeline-template
# needs:
# - job: "test-linux-stable 2/3"
#
# cancel-pipeline-test-linux-stable3:
# extends: .cancel-pipeline-template
# needs:
# - job: "test-linux-stable 3/3"
#
# cancel-pipeline-cargo-check-benches1:
# extends: .cancel-pipeline-template
# needs:
# - job: "cargo-check-benches 1/2"
#
# cancel-pipeline-cargo-check-benches2:
# extends: .cancel-pipeline-template
# needs:
# - job: "cargo-check-benches 2/2"
#
# cancel-pipeline-test-linux-stable-int:
# extends: .cancel-pipeline-template
# needs:
# - job: test-linux-stable-int
#
# cancel-pipeline-cargo-check-each-crate-1:
# extends: .cancel-pipeline-template
# needs:
# - job: "cargo-check-each-crate 1/2"
#
# cancel-pipeline-cargo-check-each-crate-2:
# extends: .cancel-pipeline-template
# needs:
# - job: "cargo-check-each-crate 2/2"
#
# cancel-pipeline-cargo-check-each-crate-macos:
# extends: .cancel-pipeline-template
# needs:
# - job: cargo-check-each-crate-macos
#
# cancel-pipeline-check-tracing:
# extends: .cancel-pipeline-template
# needs:
# - job: check-tracing