From 089de29b49048e274935a447ebdd45ce826c72fc Mon Sep 17 00:00:00 2001 From: Shaun Crampton Date: Mon, 18 Nov 2024 13:37:45 -0700 Subject: [PATCH 01/11] Add migration to tiers. (#9493) - Add tests to verify that all resoruce types are covered. - Clean up unused resource names. Fix formatting. --- .../commands/datastore/migrate/export.go | 47 ++++++++--------- .../commands/datastore/migrate/export_test.go | 50 +++++++++++++++++-- .../datastore/migrate/migrate_suite_test.go | 2 +- .../datastore/migrate/migrateipam_test.go | 7 ++- libcalico-go/lib/backend/model/resource.go | 8 +++ 5 files changed, 79 insertions(+), 35 deletions(-) diff --git a/calicoctl/calicoctl/commands/datastore/migrate/export.go b/calicoctl/calicoctl/commands/datastore/migrate/export.go index 07ff976d8e9..045a45a9f30 100644 --- a/calicoctl/calicoctl/commands/datastore/migrate/export.go +++ b/calicoctl/calicoctl/commands/datastore/migrate/export.go @@ -48,38 +48,35 @@ var title = cases.Title(language.English) var allV3Resources []string = []string{ "ippools", "bgppeers", + "tiers", // Must come before policies since policies reference tiers. "globalnetworkpolicies", "globalnetworksets", - "heps", - "kubecontrollersconfigs", + "hostendpoints", + "kubecontrollersconfigurations", "networkpolicies", "networksets", - "nodes", - "bgpconfigs", - "felixconfigs", + "nodes", // Must be before resources that reference nodes. + "bgpconfigurations", + "felixconfigurations", "ipreservations", "bgpfilters", } var resourceDisplayMap map[string]string = map[string]string{ - "ipamBlocks": "IPAMBlocks", - "blockaffinities": "BlockAffinities", - "ipamhandles": "IPAMHandles", - "ipamconfigs": "IPAMConfigurations", - "ippools": "IPPools", - "bgpconfigs": "BGPConfigurations", - "bgppeers": "BGPPeers", - "clusterinfos": "ClusterInformations", - "felixconfigs": "FelixConfigurations", - "globalnetworkpolicies": "GlobalNetworkPolicies", - "globalnetworksets": "GlobalNetworkSets", - "heps": "HostEndpoints", - "kubecontrollersconfigs": "KubeControllersConfigurations", - "networkpolicies": "NetworkPolicies", - "networksets": "Networksets", - "nodes": "Nodes", - "ipreservations": "IPReservations", - "bgpfilters": "BGPFilters", + "ippools": "IPPools", + "bgpconfigurations": "BGPConfigurations", + "bgppeers": "BGPPeers", + "felixconfigurations": "FelixConfigurations", + "globalnetworkpolicies": "GlobalNetworkPolicies", + "globalnetworksets": "GlobalNetworkSets", + "hostendpoints": "HostEndpoints", + "kubecontrollersconfigurations": "KubeControllersConfigurations", + "networkpolicies": "NetworkPolicies", + "networksets": "NetworkSets", + "nodes": "Nodes", + "ipreservations": "IPReservations", + "bgpfilters": "BGPFilters", + "tiers": "Tiers", } var namespacedResources map[string]struct{} = map[string]struct{}{ @@ -289,7 +286,7 @@ Description: // Felix configs may also need to be modified if node names do not match the Kubernetes node names. // Felix configs must come after nodes in the allV3Resources list since we populate the node mapping when nodes are exported. - if r == "felixconfigs" { + if r == "felixconfigurations" { err := meta.EachListItem(resource, func(obj runtime.Object) error { felixConfig, ok := obj.(*apiv3.FelixConfiguration) if !ok { @@ -315,7 +312,7 @@ Description: // BGP configs may also need to be modified if node names do not match the Kubernetes node names. // BGP configs must come after nodes in the allV3Resources list since we populate the node mapping when nodes are exported. - if r == "bgpconfigs" { + if r == "bgpconfigurations" { err := meta.EachListItem(resource, func(obj runtime.Object) error { bgpConfig, ok := obj.(*apiv3.BGPConfiguration) if !ok { diff --git a/calicoctl/calicoctl/commands/datastore/migrate/export_test.go b/calicoctl/calicoctl/commands/datastore/migrate/export_test.go index 9224d7c45f8..b42c60b528a 100644 --- a/calicoctl/calicoctl/commands/datastore/migrate/export_test.go +++ b/calicoctl/calicoctl/commands/datastore/migrate/export_test.go @@ -12,14 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migrate_test +package migrate import ( + "strings" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3" - "github.com/projectcalico/calico/calicoctl/calicoctl/commands/datastore/migrate" + "github.com/projectcalico/calico/libcalico-go/lib/backend/model" + "github.com/projectcalico/calico/libcalico-go/lib/set" ) var _ = Describe("Etcd to KDD Migration Export handling", func() { @@ -33,7 +36,7 @@ var _ = Describe("Etcd to KDD Migration Export handling", func() { IptablesFilterDenyAction: "DROP", } - migrate.ConvertIptablesFields(felixConfig) + ConvertIptablesFields(felixConfig) Expect(felixConfig.Spec.DefaultEndpointToHostAction).To(Equal("Drop")) Expect(felixConfig.Spec.IptablesFilterAllowAction).To(Equal("Accept")) Expect(felixConfig.Spec.IptablesMangleAllowAction).To(Equal("Return")) @@ -49,7 +52,7 @@ var _ = Describe("Etcd to KDD Migration Export handling", func() { IptablesFilterDenyAction: "Drop", } - migrate.ConvertIptablesFields(felixConfig) + ConvertIptablesFields(felixConfig) Expect(felixConfig.Spec.DefaultEndpointToHostAction).To(Equal("Drop")) Expect(felixConfig.Spec.IptablesFilterAllowAction).To(Equal("Accept")) Expect(felixConfig.Spec.IptablesMangleAllowAction).To(Equal("Return")) @@ -60,11 +63,48 @@ var _ = Describe("Etcd to KDD Migration Export handling", func() { felixConfig := apiv3.NewFelixConfiguration() felixConfig.Spec = apiv3.FelixConfigurationSpec{} - migrate.ConvertIptablesFields(felixConfig) + ConvertIptablesFields(felixConfig) Expect(felixConfig.Spec.DefaultEndpointToHostAction).To(Equal("")) Expect(felixConfig.Spec.IptablesFilterAllowAction).To(Equal("")) Expect(felixConfig.Spec.IptablesMangleAllowAction).To(Equal("")) Expect(felixConfig.Spec.IptablesFilterDenyAction).To(Equal("")) }) }) + + It("should cover all calico resources", func() { + allPlurals := set.FromArray(model.AllResourcePlurals()) + + // Profiles are backed by k8s resources in KDD. User cannot create + // their own. + allPlurals.Discard("profiles") + // WEPs are backed by Pods in KDD. + allPlurals.Discard("workloadendpoints") + // ClusterInformation is generated fresh in the new cluster. + allPlurals.Discard("clusterinformations") + // Not supported in KDD (OpenStack only). + allPlurals.Discard("caliconodestatuses") + // Handled by IPAM migration code. + allPlurals.Discard("ipamconfigs") + allPlurals.Discard("blockaffinities") + + allPlurals.Iter(func(resource string) error { + if strings.HasPrefix(resource, "kubernetes") { + // "kubernetes"-prefixed resources are backed by Kubernetes API + // objects, not Calico objects. + return set.RemoveItem + } + return nil + }) + + Expect(allV3Resources).To(ConsistOf(allPlurals.Slice())) + }) + + It("should have names for all resources", func() { + var keys []string + for k := range resourceDisplayMap { + keys = append(keys, k) + } + Expect(keys).To(ConsistOf(allV3Resources), + "expected to see names for the listed calico resources (only)") + }) }) diff --git a/calicoctl/calicoctl/commands/datastore/migrate/migrate_suite_test.go b/calicoctl/calicoctl/commands/datastore/migrate/migrate_suite_test.go index ea61e600ad7..d649ca94b7a 100644 --- a/calicoctl/calicoctl/commands/datastore/migrate/migrate_suite_test.go +++ b/calicoctl/calicoctl/commands/datastore/migrate/migrate_suite_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migrate_test +package migrate import ( "testing" diff --git a/calicoctl/calicoctl/commands/datastore/migrate/migrateipam_test.go b/calicoctl/calicoctl/commands/datastore/migrate/migrateipam_test.go index 7ee476e830f..5a4c07440d6 100644 --- a/calicoctl/calicoctl/commands/datastore/migrate/migrateipam_test.go +++ b/calicoctl/calicoctl/commands/datastore/migrate/migrateipam_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package migrate_test +package migrate import ( "context" @@ -21,7 +21,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/projectcalico/calico/calicoctl/calicoctl/commands/datastore/migrate" bapi "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" client "github.com/projectcalico/calico/libcalico-go/lib/clientv3" @@ -99,7 +98,7 @@ var _ = Describe("IPAM migration handling", func() { bc := NewMockIPAMBackendClient(blocks, affinities, handles) client := NewMockIPAMClient(bc) - migrateIPAM := migrate.NewMigrateIPAM(client) + migrateIPAM := NewMigrateIPAM(client) migrateIPAM.SetNodeMap(map[string]string{nodeName: newNodeName}) err := migrateIPAM.PullFromDatastore() Expect(err).NotTo(HaveOccurred()) @@ -144,7 +143,7 @@ var _ = Describe("IPAM migration handling", func() { bc := NewMockIPAMBackendClient(blocks, affinities, handles) client := NewMockIPAMClient(bc) - migrateIPAM := migrate.NewMigrateIPAM(client) + migrateIPAM := NewMigrateIPAM(client) migrateIPAM.SetNodeMap(map[string]string{nodeName: nodeName}) err := migrateIPAM.PullFromDatastore() Expect(err).NotTo(HaveOccurred()) diff --git a/libcalico-go/lib/backend/model/resource.go b/libcalico-go/lib/backend/model/resource.go index 15da7e35bc1..71f509aa815 100644 --- a/libcalico-go/lib/backend/model/resource.go +++ b/libcalico-go/lib/backend/model/resource.go @@ -57,6 +57,14 @@ func registerResourceInfo(kind string, plural string, typeOf reflect.Type) { resourceInfoByPlural[plural] = ri } +func AllResourcePlurals() []string { + plurals := make([]string, 0, len(resourceInfoByPlural)) + for plural := range resourceInfoByPlural { + plurals = append(plurals, plural) + } + return plurals +} + func init() { registerResourceInfo( apiv3.KindBGPPeer, From 24d0909bf29b8b5aa5660e9abb0815cadd947a70 Mon Sep 17 00:00:00 2001 From: "tuti." Date: Tue, 19 Nov 2024 08:27:47 -0800 Subject: [PATCH 02/11] fix release notes for hashrelease (#9502) --- release/build/main.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/release/build/main.go b/release/build/main.go index 76034008284..a3a8777bd75 100644 --- a/release/build/main.go +++ b/release/build/main.go @@ -270,7 +270,11 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { // For real releases, release notes are generated prior to building the release. For hash releases, // generate a set of release notes and add them to the hashrelease directory. - if _, err := outputs.ReleaseNotes(c.String(orgFlag), cfg.GithubToken, cfg.RepoRootDir, filepath.Join(dir, releaseNotesDir), versions.ProductVersion); err != nil { + releaseVersion, err := version.DetermineReleaseVersion(versions.ProductVersion, cfg.DevTagSuffix) + if err != nil { + return fmt.Errorf("Failed to determine release version: %v", err) + } + if _, err := outputs.ReleaseNotes(c.String(orgFlag), cfg.GithubToken, cfg.RepoRootDir, filepath.Join(dir, releaseNotesDir), releaseVersion); err != nil { return err } From 0883212889c6f88cca02089963c117ca11ac2878 Mon Sep 17 00:00:00 2001 From: "tuti." Date: Tue, 19 Nov 2024 11:49:24 -0800 Subject: [PATCH 03/11] [release tool] update releasing calico (#9487) * update releasing calico - release as draft release - update ghr version to latest (v0.17.0) * update releasing docs --- Makefile | 2 +- metadata.mk | 1 + release/RELEASING.md | 155 +++++++++++++------------- release/pkg/manager/calico/manager.go | 1 + 4 files changed, 81 insertions(+), 78 deletions(-) diff --git a/Makefile b/Makefile index ada87db080d..767256aa351 100644 --- a/Makefile +++ b/Makefile @@ -119,7 +119,7 @@ release/bin/release: $(shell find ./release -type f -name '*.go') # Install ghr for publishing to github. bin/ghr: - $(DOCKER_RUN) -e GOBIN=/go/src/$(PACKAGE_NAME)/bin/ $(CALICO_BUILD) go install github.com/tcnksm/ghr@v0.14.0 + $(DOCKER_RUN) -e GOBIN=/go/src/$(PACKAGE_NAME)/bin/ $(CALICO_BUILD) go install github.com/tcnksm/ghr@$(GHR_VERSION) # Build a release. release: release/bin/release diff --git a/metadata.mk b/metadata.mk index 56d752e793b..ec3c2badbce 100644 --- a/metadata.mk +++ b/metadata.mk @@ -18,6 +18,7 @@ KINDEST_NODE_VERSION=v1.30.4 KIND_VERSION=v0.24.0 PROTOC_VER=v0.1 UBI_VERSION=8.10 +GHR_VERSION=v0.17.0 # Configuration for Semaphore/Github integration. ORGANIZATION = projectcalico diff --git a/release/RELEASING.md b/release/RELEASING.md index 251c7c2ae1b..75fe79d0769 100644 --- a/release/RELEASING.md +++ b/release/RELEASING.md @@ -1,4 +1,4 @@ -# How to release Calico +# Releasing Calico > **NOTE:** These instructions apply only to Calico versions v3.21 or greater. > For older releases, refer to the instructions in the corresponding `release-vX.Y` branch. @@ -9,8 +9,7 @@ 1. [Verify the code is ready for release](#2-verify-the-code-is-ready-for-release) 1. [Create a release branch](#3-create-a-release-branch) 1. [Performing a release](#4-performing-a-release) -1. [Promoting to be the latest release in the docs](#5-promoting-to-be-the-latest-release-in-the-docs) -1. [Post-release](#6-post-release) +1. [Post-release](#5-post-release) ## 1. Prerequisites @@ -23,19 +22,19 @@ To publish Calico, you need **the following permissions**: - Push access to the Calico DockerHub repositories. Assuming you've been granted access by an admin: - ``` + ```sh docker login ``` - Push access to the Calico quay.io repositories. Assuming you've been granted access by an admin: - ``` + ```sh docker login quay.io ``` - Push access to the gcr.io/projectcalico-org repositories. **Note:** Some of the repos do not yet support credential helpers, you must use one of the token-based logins. For example, assuming you've been granted access, this will configure a short-lived auth token: - ``` + ```sh gcloud auth print-access-token | docker login -u oauth2accesstoken --password-stdin https://gcr.io ``` @@ -44,23 +43,21 @@ To publish Calico, you need **the following permissions**: - You must be able to access binaries.projectcalico.org. - To publish the helm release to the repo, you’ll need an AWS helm profile: - Add this to your ~/.aws/config - ``` - [profile helm] - role_arn = arn:aws:iam:::role/CalicoDevHelmAdmin - mfa_serial = arn:aws:iam:::mfa/myusername - source_profile = default - region = us-east-2 - ``` - Your user will need permission for assuming the helm admin role in the production account. + Add this to your `~/.aws/config` -You'll also need **several GB of disk space**. + ```sh + [profile helm] + role_arn = arn:aws:iam:::role/CalicoDevHelmAdmin + mfa_serial = arn:aws:iam:::mfa/myusername + source_profile = default + region = us-east-2 + ``` -Some of the release scripts also require **tools to be installed** in your dev environment: + Your user will need permission for assuming the helm admin role in the production account. -- [Install and configure](https://github.com/github/hub#installation) the GitHub `hub` tool. +You'll also need **several GB of disk space**. -Finally, the release process **assumes that your repos are checked out with name `origin`** for the git remote +Finally, the release process **assumes that your repos are checked out with name `origin`** as the git remote for the main Calico repo. ## 2. Verify the code is ready for release @@ -85,17 +82,17 @@ When starting development on a new minor release, the first step is to create a 1. Create a new branch off of the latest master and publish it, along with a dev tag for the next release. - ``` + ```sh git checkout master && git pull origin master ``` - ``` + ```sh make create-release-branch ``` 1. Checkout the newly created branch. - ``` + ```sh git checkout release-vX.Y ``` @@ -107,33 +104,22 @@ When starting development on a new minor release, the first step is to create a Then, run manifest generation - ``` + ```sh make generate ``` Commit your changes - ``` + ```sh Update manifests for release-vX.Y ``` Then, push your changes to the branch. - ``` + ```sh git push origin release-vX.Y ``` -### Setting up netlify - -1. On netlify create a new site using the `release-vX.Y` branch (You should at least have write access to this repo for site creation) - -1. Rename the randomly generated site name to follow the same naming convention as other releases (Ex: `calico-vX-Y`). - -1. Ensure that the site is generated properly by visiting site URL (Ex. https://calico-vX-Y.netlify.app/archive/vX.Y/). - -1. Cherry-pick the proxy rules commit created earlier to the latest production branch, as well as `master`. - This will make the candidate site docs available at `projectcalico.docs.tigera.io/archive/vX.Y/` (Note: the trailing slash) - ### Updating milestones for the new branch Once a new branch is cut, we need to ensure a new milestone exists to represent the next release that will be cut from the master branch. @@ -146,13 +132,13 @@ Once a new branch is cut, we need to ensure a new milestone exists to represent ### 4.a Create a temporary branch for this release against origin -1. Create a new branch based off of `release-vX.Y`. +1. Create a new branch `build-vX.Y.Z` based off of `release-vX.Y`. - ``` + ```sh git checkout release-vX.Y && git pull origin release-vX.Y ``` - ``` + ```sh git checkout -b build-vX.Y.Z ``` @@ -163,7 +149,7 @@ Once a new branch is cut, we need to ensure a new milestone exists to represent 1. Update manifests (and other auto-generated code) by running the following command in the repository root. - ``` + ```sh make generate ``` @@ -171,18 +157,20 @@ Once a new branch is cut, we need to ensure a new milestone exists to represent Then, add the newly created release note file to git. - ``` + ```sh git add release-notes/-release-notes.md ``` 1. Commit your changes. For example: - ``` + ```sh git commit -m "Updates for vX.Y.Z" ``` 1. Push the branch to `github.com/projectcalico/calico` and create a pull request. Get it reviewed and ensure it passes CI before moving to the next step. +1. If this is the first release from this release branch i.e. `vX.Y.0`, create a new Calico X.Y.x PPA in launchpad + ### 4.b Build and publish the repository in Semaphore To build and publish the release artifacts, find the desired commit [in Semaphore](https://tigera.semaphoreci.com/projects/calico), verify that all tests for that @@ -194,24 +182,9 @@ Wait for this job to complete before moving on to the next step. Follow [the tigera/operator release instructions](https://github.com/tigera/operator/blob/master/RELEASING.md). -### 4.d Build and publish OpenStack packages +### 4.d Publish the release on Github -1. Check out the release tag in the `projectcalico/calico` repository. - - ``` - git fetch origin --tags && git checkout vX.Y.Z - ``` - -1. In your environment, set `HOST` to the GCP name for binaries.projectcalico.org, `GCLOUD_ARGS` to the `--zone` and `--project` args needed to access that host, and `SECRET_KEY` to - the secret key for a GPG identity that you have uploaded to your Launchpad account. - -1. Establish GCP credentials so that gcloud with `HOST` and `GCLOUD_ARGS` can access binaries.projectcalico.org. - -1. Build OpenStack packages from the checked out commit. - - ``` - make -C release/packaging release-publish VERSION=vX.Y.Z - ``` +Go to the [Calico release page](https://github.com/projectcalico/calico/releases) and publish the draft release. ### 4.e Update the docs with the new version @@ -231,7 +204,7 @@ Follow [the tigera/operator release instructions](https://github.com/tigera/oper 1. Run the post-release checks. The release validation checks will run - they check for the presence of all the required binaries tarballs, tags, etc. - ``` + ```sh make VERSION=... FLANNEL_VERSION=... OPERATOR_VERSION=... postrelease-checks ``` @@ -239,6 +212,46 @@ Follow [the tigera/operator release instructions](https://github.com/tigera/oper 1. Kick off some e2e tests to test the contents of the release. +### Update API repository + +The `projectcalico/api` repository needs to be updated to stay in sync with the Calico API. + +**First**, ensure that you have [Github CLI tool](https://github.com/cli/cli#installation) + +1. Clone the API repository + + ```sh + git clone git@github.com:projectcalico/api.git + ``` + +1. Create or checkout the release branch `release-vX.Y`. + + For a major/minor release: + + ```sh + git checkout -b release-vX.Y && git push origin release-vX.Y + ``` + + For a patch release: + + ```sh + git checkout release-vX.Y && git pull origin release-vX.Y + ``` + +1. Update APIs by running the following command + + ```sh + make -f Makefile.local pr CALICO_GIT_REF=vX.Y.Z` + ``` + + This runs a script that clones `projectcalico/calico`, import the updated files and creates a PR. + + > NOTE: if an auto-api PR already exists for this version, + > it will print an error about the PR existing already. + > The existing PR still gets updated with changes + +1. Get the PR reviewed, approved and merged + # Release notes Release notes for a Calico release contain notable changes across Calico repositories. To write release notes for a given version, perform the following steps. @@ -247,7 +260,7 @@ Release notes for a Calico release contain notable changes across Calico reposit Use this URL to query for PRs, replacing `vX.Y.Z` with your desired version. - ``` + ```sh https://github.com/issues?utf8=%E2%9C%93&q=user%3Aprojectcalico+milestone%3A%22Calico+vX.Y.Z%22+ ``` @@ -259,7 +272,7 @@ Release notes for a Calico release contain notable changes across Calico reposit 1. Run the following command to collect all release notes for the given version. - ``` + ```sh make release-notes ``` @@ -272,23 +285,11 @@ Release notes for a Calico release contain notable changes across Calico reposit Consistent release note formatting is important. Here are some examples for reference: - - [Example release notes for a major/minor release](https://github.com/projectcalico/calico/blob/v3.1.0/_includes/v3.1/release-notes/v3.1.0-release-notes.md) - - [Example release notes for a patch release](https://github.com/projectcalico/calico/blob/7d5594dbca14cb1b765b65eb11bdd8239d23dfb3/_includes/v3.0/release-notes/v3.0.5-release-notes.md) + - [Example release notes for a major/minor release](https://github.com/projectcalico/calico/blob/v3.28.0/release-notes/v3.28.0-release-notes.md) + - [Example release notes for a patch release](https://github.com/projectcalico/calico/blob/v3.28.2/release-notes/v3.28.1-release-notes.md) 1. Add the generated file to git. - ``` + ```sh git add release-notes/ ``` - -# API Repository update - -The `projectcalico/api` repository needs to be updated to stay in sync with the Calico API. The following steps will ensure that: - -1. Ensure that you have the `gh` tool installed, configured, and authenticated. Instructions are here: https://cli.github.com/manual/ -1. Clone the repository: `git clone -b release-v3.29 git@github.com:projectcalico/api.git calico-api-v3.29` -2. `cd` into the repository and run the `pr` make target from `Makefile.local`: `make -f Makefile.local pr CALICO_GIT_REF=`. `CALICO_GIT_REF` should be the tag for this release, e.g. `CALICO_GIT_REF=v3.29.0` if that's the version you just released. -3. The script will clone the upstream repository (i.e. `projectcalico/calico`), import the updated files, commit them, and create a PR for them -7. Once this is done, it will output a URL for a PR, which you can then review and get approved. - -Note that if an auto-api PR already exists for this minor version, it will print an error about the PR existing already; this is fine, and the script will have updated the PR instead. Go to Github, find the PR manually, and review it to ensure everything looks correct, then have it merged. diff --git a/release/pkg/manager/calico/manager.go b/release/pkg/manager/calico/manager.go index df04ba52da7..5e5b12c51af 100644 --- a/release/pkg/manager/calico/manager.go +++ b/release/pkg/manager/calico/manager.go @@ -754,6 +754,7 @@ Additional links: "-repository", r.repo, "-name", ver, "-body", releaseNote, + "-draft", ver, r.uploadDir(), } From c89284d37f2947f7b7f491dfff08624e1dc77c4c Mon Sep 17 00:00:00 2001 From: Tanuj Dwivedi Date: Wed, 20 Nov 2024 21:51:11 +0530 Subject: [PATCH 04/11] Fix: announcement of /32 and /128 entries for serviceExternalIPs (#9422) --- confd/pkg/backends/calico/client.go | 31 ++++--- confd/pkg/backends/calico/routes.go | 48 ++++++++++- confd/pkg/backends/calico/routes_test.go | 105 ++++++++++++++++++++--- 3 files changed, 155 insertions(+), 29 deletions(-) diff --git a/confd/pkg/backends/calico/client.go b/confd/pkg/backends/calico/client.go index 6595446aefb..70f501ac7b3 100644 --- a/confd/pkg/backends/calico/client.go +++ b/confd/pkg/backends/calico/client.go @@ -1409,7 +1409,11 @@ func getCommunitiesArray(communitiesSet set.Set[string]) []string { } func (c *client) onExternalIPsUpdate(externalIPs []string) { - if err := c.updateGlobalRoutes(externalIPs, c.ExternalIPRouteIndex); err == nil { + // ExternalIPs which are single addresses need to be advertised from every node for services of type "Cluster" + // and from only individual nodes with service present for services of type "Local". Both of these will be handled + // within the routeGenerator and should not be exposed as globalRoutes. + globalExtIPs := filterNonSingleIPsFromCIDRs(externalIPs) + if err := c.updateGlobalRoutes(globalExtIPs, c.ExternalIPRouteIndex); err == nil { c.externalIPs = externalIPs c.externalIPNets = parseIPNets(c.externalIPs) log.Infof("Updated with new external IP CIDRs: %s", externalIPs) @@ -1432,16 +1436,7 @@ func (c *client) onLoadBalancerIPsUpdate(lbIPs []string) { // However, we don't want to advertise single IPs in this way because it breaks any "local" type services the user creates, // which should instead be advertised from only a subset of nodes. // So, we handle advertisement of any single-addresses found in the config on a per-service basis from within the routeGenerator. - var globalLbIPs []string - for _, lbIP := range lbIPs { - if strings.Contains(lbIP, ":") { - if !strings.HasSuffix(lbIP, "/128") { - globalLbIPs = append(globalLbIPs, lbIP) - } - } else if !strings.HasSuffix(lbIP, "/32") { - globalLbIPs = append(globalLbIPs, lbIP) - } - } + globalLbIPs := filterNonSingleIPsFromCIDRs(lbIPs) if err := c.updateGlobalRoutes(globalLbIPs, c.LoadBalancerIPRouteIndex); err == nil { c.loadBalancerIPs = lbIPs c.loadBalancerIPNets = parseIPNets(c.loadBalancerIPs) @@ -1874,6 +1869,20 @@ func withDefault(val, dflt string) string { return dflt } +func filterNonSingleIPsFromCIDRs(ipCidrs []string) []string { + var nonSingleIPs []string + for _, ip := range ipCidrs { + if strings.Contains(ip, ":") { + if !strings.HasSuffix(ip, "/128") { + nonSingleIPs = append(nonSingleIPs, ip) + } + } else if !strings.HasSuffix(ip, "/32") { + nonSingleIPs = append(nonSingleIPs, ip) + } + } + return nonSingleIPs +} + // Checks whether or not a key references sensitive information (like a BGP password) so that // logging output for the field can be redacted. func (c *client) isSensitive(path string) bool { diff --git a/confd/pkg/backends/calico/routes.go b/confd/pkg/backends/calico/routes.go index 3b5103b68e8..1e47d1be77b 100644 --- a/confd/pkg/backends/calico/routes.go +++ b/confd/pkg/backends/calico/routes.go @@ -377,6 +377,32 @@ func (rg *routeGenerator) isSingleLoadBalancerIP(loadBalancerIP string) bool { return false } +// isSingleExternalIP determines if the given IP is in the list of +// allowed ExternalIP CIDRs given in the default bgpconfiguration +// and is a single IP entry (/32 for IPV4 or /128 for IPV6) +func (rg *routeGenerator) isSingleExternalIP(externalIP string) bool { + if externalIP == "" { + log.Debug("Skip empty service External IP") + return false + } + ip := net.ParseIP(externalIP) + if ip == nil { + log.Errorf("Could not parse service External IP: %s", externalIP) + return false + } + + for _, allowedNet := range rg.client.GetExternalIPs() { + if allowedNet.Contains(ip) { + if ones, bits := allowedNet.Mask.Size(); ones == bits { + return true + } + } + } + + // Guilty until proven innocent + return false +} + // addFullIPLength returns a new slice, with the full IP length appended onto every item. func addFullIPLength(items []string) []string { res := make([]string, 0) @@ -423,10 +449,24 @@ func (rg *routeGenerator) advertiseThisService(svc *v1.Service, ep *v1.Endpoints return false } - // we need to announce single IPs for services of type LoadBalancer and externalTrafficPolicy Cluster - if svc.Spec.Type == v1.ServiceTypeLoadBalancer && svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster && rg.isSingleLoadBalancerIP(svc.Spec.LoadBalancerIP) { - logc.Debug("Advertising load balancer of type cluster because of single IP definition") - return true + // we need to announce single IPs for services of type externalTrafficPolicy Cluster. + // There are 2 cases inside this type: + // - LoadBalancer with a single IP. + // - Any one of externalIPs in service of type LoadBalancer or NodePort with a single IP. + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + if svc.Spec.Type == v1.ServiceTypeLoadBalancer && rg.isSingleLoadBalancerIP(svc.Spec.LoadBalancerIP) { + logc.Debug("Advertising load balancer of type cluster because of single IP definition") + return true + } + + if svc.Spec.Type == v1.ServiceTypeLoadBalancer || svc.Spec.Type == v1.ServiceTypeNodePort { + for _, extIP := range svc.Spec.ExternalIPs { + if rg.isSingleExternalIP(extIP) { + logc.Debug("Advertising external IP of type cluster because of single IP definition") + return true + } + } + } } // we only need to advertise local services, since we advertise the entire cluster IP range. diff --git a/confd/pkg/backends/calico/routes_test.go b/confd/pkg/backends/calico/routes_test.go index 9e3b4666b62..a36146280fe 100644 --- a/confd/pkg/backends/calico/routes_test.go +++ b/confd/pkg/backends/calico/routes_test.go @@ -27,6 +27,9 @@ const ( // Specific IP for loadbalancer IP test. loadBalancerIP1 = "172.217.4.10" + + // externalIP3 for single external IP test. + externalIP3 = "45.12.70.7" ) func addEndpointSubset(ep *v1.Endpoints, nodename string) { @@ -95,6 +98,23 @@ func buildSimpleService3() (svc *v1.Service, ep *v1.Endpoints) { return } +func buildSimpleService4() (svc *v1.Service, ep *v1.Endpoints) { + meta := metav1.ObjectMeta{Namespace: "foo", Name: "ext"} + svc = &v1.Service{ + ObjectMeta: meta, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeLoadBalancer, + ClusterIP: "127.0.0.11", + ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal, + ExternalIPs: []string{externalIP3}, + }, + } + ep = &v1.Endpoints{ + ObjectMeta: meta, + } + return +} + var _ = Describe("RouteGenerator", func() { var rg *routeGenerator var expectedSvcRouteMap map[string]bool @@ -230,30 +250,36 @@ var _ = Describe("RouteGenerator", func() { Describe("resourceInformerHandlers", func() { var ( - svc, svc2, svc3 *v1.Service - ep, ep2, ep3 *v1.Endpoints + svc, svc2, svc3, svc4 *v1.Service + ep, ep2, ep3, ep4 *v1.Endpoints ) BeforeEach(func() { svc, ep = buildSimpleService() svc2, ep2 = buildSimpleService2() svc3, ep3 = buildSimpleService3() + svc4, ep4 = buildSimpleService4() addEndpointSubset(ep, rg.nodeName) addEndpointSubset(ep2, rg.nodeName) addEndpointSubset(ep3, rg.nodeName) + addEndpointSubset(ep4, rg.nodeName) err := rg.epIndexer.Add(ep) Expect(err).NotTo(HaveOccurred()) err = rg.epIndexer.Add(ep2) Expect(err).NotTo(HaveOccurred()) err = rg.epIndexer.Add(ep3) Expect(err).NotTo(HaveOccurred()) + err = rg.epIndexer.Add(ep4) + Expect(err).NotTo(HaveOccurred()) err = rg.svcIndexer.Add(svc) Expect(err).NotTo(HaveOccurred()) err = rg.svcIndexer.Add(svc2) Expect(err).NotTo(HaveOccurred()) err = rg.svcIndexer.Add(svc3) Expect(err).NotTo(HaveOccurred()) + err = rg.svcIndexer.Add(svc4) + Expect(err).NotTo(HaveOccurred()) }) It("should remove advertised IPs when endpoints are deleted", func() { @@ -526,9 +552,9 @@ var _ = Describe("RouteGenerator", func() { By("onExternalIPsUpdate to include /32 route") rg.client.onExternalIPsUpdate([]string{externalIPRangeSingle}) - // Expect that we advertise the /32 given to us via BGPConfiguration. - Expect(rg.client.cache[key]).To(Equal(externalIP1 + "/32")) - Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) + // Expect that we don't advertise the /32 given to us via BGPConfiguration. We do that via route generator. + Expect(rg.client.cache[key]).To(Equal("")) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(0)) // Trigger programming of routes from the route generator again. This time, the service's externalIP // will be allowed by BGPConfiguration and so it should be programmed. @@ -536,9 +562,9 @@ var _ = Describe("RouteGenerator", func() { rg.resyncKnownRoutes() // Expect that we continue to advertise the route, but the refcount should indicate a route received - // from both the RouteGenerator and BGPConfiguration. + // from only the RouteGenerator. Expect(rg.client.cache[key]).To(Equal(externalIP1 + "/32")) - Expect(rg.client.programmedRouteRefCount[key]).To(Equal(2)) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) // Simulate an event from the syncer which updates the range. It still includes the original IP, // to ensure we don't trigger the route generator to withdraw its route. @@ -546,8 +572,7 @@ var _ = Describe("RouteGenerator", func() { rg.client.onExternalIPsUpdate([]string{externalIPRange1}) rg.resyncKnownRoutes() - // The route should still exist, since the RouteGenerator's route is still valid. However, - // its reference count should be decremented back to one. + // The route should still exist, since the RouteGenerator's route is still valid. Expect(rg.client.cache[key]).To(Equal(externalIP1 + "/32")) Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) @@ -555,16 +580,15 @@ var _ = Describe("RouteGenerator", func() { By("onExternalIPsUpdate to include /32 route again") rg.client.onExternalIPsUpdate([]string{externalIPRangeSingle}) rg.resyncKnownRoutes() - Expect(rg.client.programmedRouteRefCount[key]).To(Equal(2)) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) - // Now, remove both services (since both contribute externalIP). Ensure that the route is still programmed - // (via BGPConfiguration), but the ref count should once again drop to 1. + // Now, remove both services (since both contribute externalIP). Route should not be programmed anymore. By("Deleting svc") rg.onSvcDelete(svc) By("Deleting svc2") rg.onSvcDelete(svc2) - Expect(rg.client.cache[key]).To(Equal(externalIP1 + "/32")) - Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) + Expect(rg.client.cache[key]).To(Equal("")) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(0)) // Finally, remove BGPConfiguration. It should withdraw the route // and delete the refcount entry. @@ -625,6 +649,59 @@ var _ = Describe("RouteGenerator", func() { Expect(rg.client.cache).NotTo(HaveKey(key)) Expect(rg.client.programmedRouteRefCount).NotTo(HaveKey(key)) }) + + // This test simulates a situation where BGPConfiguration has a /32 route that exactly matches + // externalIP of a LoadBalancer service with ExternalTrafficPolicy set to Local. The route should only be advertised + // when the Service is created, and not when the BGPConfiguration is created. + It("should handle /32 routes for externalIPs", func() { + // BeforeEach creates a service. Remove it before the test, since we want to start + // this test without the service in place. svc4 is a LoadBalancer service with external traffic + // policy of Local and an externalIP. + err := rg.epIndexer.Delete(ep4) + Expect(err).NotTo(HaveOccurred()) + err = rg.svcIndexer.Delete(svc4) + Expect(err).NotTo(HaveOccurred()) + + // The key we expect to be used for the LB IP. + key := "/calico/staticroutes/" + externalIP3 + "-32" + + // Trigger programming of valid routes from the route generator for any known services. + // We don't have a BGPConfiguration update or services yet, so we shouldn't receive any routes. + By("Resyncing routes at start of test") + rg.resyncKnownRoutes() + Expect(rg.client.cache[key]).To(Equal("")) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(0)) + + // Simulate an event from the syncer which sets the external IP range containing only the service's externalIP. + // We use a /32 route to trigger the situation under test. + externalIPRangeSingle := fmt.Sprintf("%s/32", externalIP3) + By("onExternalIPsUpdate to include /32 route") + rg.client.onExternalIPsUpdate([]string{externalIPRangeSingle}) + rg.resyncKnownRoutes() + + // No routes should be advertised yet. + Expect(rg.client.cache[key]).To(Equal("")) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(0)) + + // Now add the service. + err = rg.epIndexer.Add(ep4) + Expect(err).NotTo(HaveOccurred()) + err = rg.svcIndexer.Add(svc4) + Expect(err).NotTo(HaveOccurred()) + + // Expect that we advertise the /32 external IP from the Service. + By("Resyncing routes from route generator") + rg.resyncKnownRoutes() + Expect(rg.client.cache[key]).To(Equal(externalIP3 + "/32")) + Expect(rg.client.programmedRouteRefCount[key]).To(Equal(1)) + + // Finally, remove BGPConfiguration. It should withdraw the route + // and delete the refcount entry. + rg.client.onExternalIPsUpdate([]string{}) + rg.resyncKnownRoutes() + Expect(rg.client.cache).NotTo(HaveKey(key)) + Expect(rg.client.programmedRouteRefCount).NotTo(HaveKey(key)) + }) }) }) }) From ff48d0f5ecf20a092bc600181a9b1b118fb29754 Mon Sep 17 00:00:00 2001 From: Daniel Fox Date: Wed, 20 Nov 2024 11:59:18 -0800 Subject: [PATCH 05/11] Add release-build and release-publish targets to key-cert-provisioner (#9463) * Add release-build and release-publish targets * Remove duplicate make target --- key-cert-provisioner/Makefile | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/key-cert-provisioner/Makefile b/key-cert-provisioner/Makefile index 35846f4df44..b621b031bec 100644 --- a/key-cert-provisioner/Makefile +++ b/key-cert-provisioner/Makefile @@ -55,11 +55,9 @@ $(BINDIR)/test-signer-$(ARCH): # BUILD IMAGE ############################################################################### .PHONY: image-all -image-all: $(addprefix sub-image-,$(VALIDARCHES)) sub-image-fips-amd64 +image-all: $(addprefix sub-image-,$(VALIDARCHES)) sub-image-%: $(MAKE) image ARCH=$* -sub-image-fips-%: - $(MAKE) image FIPS=true ARCH=$* SIGNER_CREATED=.signer.created-$(ARCH) @@ -93,3 +91,20 @@ clean: -docker image rm -f $$(docker images $(KEY_CERT_PROVISIONER_IMAGE) -a -q) -docker image rm -f $$(docker images $(TEST_SIGNER_IMAGE) -a -q) +############################################################################### +# Release +############################################################################### +## Produces a clean build of release artifacts at the specified version. +release-build: .release-$(VERSION).created +.release-$(VERSION).created: + $(MAKE) clean image-all RELEASE=true + $(MAKE) retag-build-images-with-registries IMAGETAG=$(VERSION) RELEASE=true + # Generate the `latest` images. + $(MAKE) retag-build-images-with-registries IMAGETAG=latest RELEASE=true + touch $@ + +## Pushes a github release and release artifacts produced by `make release-build`. +release-publish: release-prereqs .release-$(VERSION).published +.release-$(VERSION).published: + $(MAKE) push-images-to-registries push-manifests IMAGETAG=$(VERSION) RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) + touch $@ From 2ea7981da4b06a8e0cd2b3686e18821afa6b2c0c Mon Sep 17 00:00:00 2001 From: Alex O Regan Date: Fri, 22 Nov 2024 11:25:09 +0000 Subject: [PATCH 06/11] amend gaugeNumRules value when a chain becomes referenced/unreferenced (#9374) --- felix/fv/infrastructure/felix.go | 15 +++ felix/fv/pre_dnat_test.go | 179 +++++++++++++++++++++++++++++++ felix/iptables/table.go | 15 ++- 3 files changed, 206 insertions(+), 3 deletions(-) diff --git a/felix/fv/infrastructure/felix.go b/felix/fv/infrastructure/felix.go index f54705389a1..02f20195797 100644 --- a/felix/fv/infrastructure/felix.go +++ b/felix/fv/infrastructure/felix.go @@ -548,6 +548,21 @@ func (f *Felix) IPTablesChains(table string) map[string][]string { return out } +// AllCalicoIPTablesRules returns a flat slice of all 'cali-*' rules in a table. +func (f *Felix) AllCalicoIPTablesRules(table string) []string { + chains := f.IPTablesChains(table) + var allRules []string + for _, chain := range chains { + for _, rule := range chain { + if strings.Contains(rule, "cali-") { + allRules = append(allRules, rule) + } + } + } + + return allRules +} + func (f *Felix) PromMetric(name string) PrometheusMetric { return PrometheusMetric{ f: f, diff --git a/felix/fv/pre_dnat_test.go b/felix/fv/pre_dnat_test.go index 3ca5a2de053..377896cadae 100644 --- a/felix/fv/pre_dnat_test.go +++ b/felix/fv/pre_dnat_test.go @@ -17,6 +17,7 @@ package fv_test import ( + "fmt" "strconv" "time" @@ -24,6 +25,7 @@ import ( . "github.com/onsi/gomega" api "github.com/projectcalico/api/pkg/apis/projectcalico/v3" "github.com/projectcalico/api/pkg/lib/numorstring" + "github.com/sirupsen/logrus" "github.com/projectcalico/calico/felix/fv/connectivity" "github.com/projectcalico/calico/felix/fv/containers" @@ -32,6 +34,7 @@ import ( "github.com/projectcalico/calico/felix/fv/workload" "github.com/projectcalico/calico/libcalico-go/lib/apiconfig" client "github.com/projectcalico/calico/libcalico-go/lib/clientv3" + "github.com/projectcalico/calico/libcalico-go/lib/options" ) // Setup for planned further FV tests: @@ -63,6 +66,8 @@ var _ = infrastructure.DatastoreDescribe("pre-dnat with initialized Felix, 2 wor options := infrastructure.DefaultTopologyOptions() // For variety, run this test with IPv6 disabled. options.EnableIPv6 = false + options.ExtraEnvVars["FELIX_PrometheusMetricsEnabled"] = "true" + tc, client = infrastructure.StartSingleNodeTopology(options, infra) // Install a default profile that allows all ingress and egress, in the absence of any Policy. @@ -209,11 +214,14 @@ var _ = infrastructure.DatastoreDescribe("pre-dnat with initialized Felix, 2 wor policy.Spec.ApplyOnForward = true protocol := numorstring.ProtocolFromString("tcp") ports := numorstring.SinglePort(8055) + metricsPort := numorstring.SinglePort(9091) + policy.Spec.Ingress = []api.Rule{{ Action: api.Allow, Protocol: &protocol, Destination: api.EntityRule{Ports: []numorstring.Port{ ports, + metricsPort, }}, }} policy.Spec.Selector = "has(host-endpoint)" @@ -221,6 +229,15 @@ var _ = infrastructure.DatastoreDescribe("pre-dnat with initialized Felix, 2 wor Expect(err).NotTo(HaveOccurred()) }) + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + logrus.WithFields(logrus.Fields{ + "filter": tc.Felixes[0].IPTablesChains("filter"), + "mangle": tc.Felixes[0].IPTablesChains("mangle"), + }).Debug("dumping iptables") + } + }) + It("external client cannot connect", func() { cc := &connectivity.Checker{} cc.ExpectSome(w[0], w[1], 32011) @@ -229,6 +246,168 @@ var _ = infrastructure.DatastoreDescribe("pre-dnat with initialized Felix, 2 wor cc.ExpectNone(externalClient, w[0], 32010) cc.CheckConnectivity() }) + + It("increments Prometheus gauge proportionally to programming rules", func() { + dataplaneProgrammed := func() bool { + // Check if a particular known chain has been programmed into the mangle table. + mangleChains := tc.Felixes[0].IPTablesChains("mangle") + if _, ok := mangleChains["cali-fh-eth0"]; !ok { + return false + } + + if len(mangleChains["cali-fh-eth0"]) == 0 { + return false + } + + return true + } + Eventually(dataplaneProgrammed).Should(BeTrue(), "Dataplane never got fully programmed") + + // A test-HEP to apply. + hep := api.NewHostEndpoint() + hep.Name = "t0" + hep.Spec.Node = tc.Felixes[0].Hostname + hep.Labels = map[string]string{"abc123": "true"} + hep.Spec.InterfaceName = "*" + + // A test-GNP for the HEP. + policy := api.NewGlobalNetworkPolicy() + policy.Name = "allow-ingress-8055-1" + order := float64(11) + policy.Spec.Order = &order + policy.Spec.PreDNAT = true + policy.Spec.ApplyOnForward = true + protocol := numorstring.ProtocolFromString("tcp") + testPort := numorstring.SinglePort(9999) + ports := numorstring.SinglePort(8055) + metricsPort := numorstring.SinglePort(9091) + policy.Spec.Ingress = []api.Rule{{ + Action: api.Allow, + Protocol: &protocol, + Destination: api.EntityRule{Ports: []numorstring.Port{ + testPort, + ports, + metricsPort, + }}, + }} + policy.Spec.Selector = "has(abc123)" + // The same GNP but with stateful fields set. + var appliedGNP *api.GlobalNetworkPolicy + + mangleRulesMetric := tc.Felixes[0].PromMetric("felix_iptables_rules{ip_version=\"4\",table=\"mangle\"}") + filterRulesMetric := tc.Felixes[0].PromMetric("felix_iptables_rules{ip_version=\"4\",table=\"filter\"}") + Eventually(mangleRulesMetric.Int, "5s").ShouldNot(BeZero(), "Metrics traffic was never allowed") + + collectMetrics := func() (mangleMetric int, filterMetric int) { + mangleMetric, err := mangleRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + filterMetric, err = filterRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + return mangleMetric, filterMetric + } + // Perform database changes in steps. + type operation struct { + description string + do func() + } + operations := []operation{ + {"Creating a pre-DNAT GNP and a HEP", func() { + var err error + curMangleRulesMetric, err := mangleRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + appliedGNP, err = client.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) + Expect(err).NotTo(HaveOccurred(), "Couldn't create pre-DNAT GNP") + + _, err = client.HostEndpoints().Create(utils.Ctx, hep, utils.NoOptions) + Expect(err).NotTo(HaveOccurred(), "Failed to create HEP") + + Eventually(mangleRulesMetric.Int, "5s").ShouldNot(BeEquivalentTo(curMangleRulesMetric), "Mangle rules metric never changed following change of GNP") + }}, + {"Switching GNP to preDNAT: false", func() { + curMangleRulesMetric, err := mangleRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + appliedGNP.Spec.PreDNAT = false + _, err = client.GlobalNetworkPolicies().Update(utils.Ctx, appliedGNP, utils.NoOptions) + Expect(err).NotTo(HaveOccurred(), "Couldn't update GNP from pre-DNAT=true to pre-DNAT=false") + + // Wait for the change to take effect. + Eventually(mangleRulesMetric.Int, "5s").ShouldNot(BeNumerically(">", curMangleRulesMetric), "Mangle rules metric never changed following change of GNP") + }}, + {"Deleting GNP", func() { + curFilterRulesMetric, err := filterRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + // Metrics port should still be open thanks to another GNP created in the parent BeforeEach + _, err = client.GlobalNetworkPolicies().Delete(utils.Ctx, appliedGNP.Name, options.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(filterRulesMetric.Int, "5s").ShouldNot(BeEquivalentTo(curFilterRulesMetric), "Filter rules metric never changed following deletion of GNP") + }}, + {"Deleting HEP", func() { + curFilterRulesMetric, err := filterRulesMetric.Int() + Expect(err).NotTo(HaveOccurred()) + + _, err = client.HostEndpoints().Delete(utils.Ctx, "t0", options.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(filterRulesMetric.Int, "5s").ShouldNot(BeEquivalentTo(curFilterRulesMetric), "Filter rules metric never changed following deletion of HEP") + }}, + } + + // Measure metrics and IPTables output and ensure + // they change proportionally to one-another. + baselineMangleMetric, baselineFilterMetric := collectMetrics() + baselineMangleTableIptablesSave := tc.Felixes[0].AllCalicoIPTablesRules("mangle") + baselineFilterTableIptablesSave := tc.Felixes[0].AllCalicoIPTablesRules("filter") + checkMangleMetricDeltaMatchesIptablesDelta := func() error { + mangleIptablesSave := tc.Felixes[0].AllCalicoIPTablesRules("mangle") + iptablesDelta := len(mangleIptablesSave) - len(baselineMangleTableIptablesSave) + + mangleMetric, err := mangleRulesMetric.Int() + if err != nil { + return err + } + + metricDelta := mangleMetric - baselineMangleMetric + if iptablesDelta != metricDelta { + return fmt.Errorf("Mangle metric delta (%d) did not match IPTables delta (%d)", metricDelta, iptablesDelta) + } + + return nil + } + checkFilterMetricDeltaMatchesIptablesDelta := func() error { + filterIptablesSave := tc.Felixes[0].AllCalicoIPTablesRules("filter") + iptablesDelta := len(filterIptablesSave) - len(baselineFilterTableIptablesSave) + + filterMetric, err := filterRulesMetric.Int() + if err != nil { + return err + } + + metricDelta := filterMetric - baselineFilterMetric + if iptablesDelta != metricDelta { + return fmt.Errorf("Filter metric delta (%d) did not match IPTables delta (%d)", metricDelta, iptablesDelta) + } + + return nil + } + + for _, operation := range operations { + By(operation.description) + + operation.do() + + Eventually(checkMangleMetricDeltaMatchesIptablesDelta).ShouldNot(HaveOccurred(), fmt.Sprintf("Mangle metric delta did not match iptables delta. During operation: %s", operation.description)) + Eventually(checkFilterMetricDeltaMatchesIptablesDelta).ShouldNot(HaveOccurred(), fmt.Sprintf("Filter metric delta did not match iptables delta. During operation: %s", operation.description)) + } + + Expect(mangleRulesMetric.Int()).To(Equal(baselineMangleMetric)) + Expect(filterRulesMetric.Int()).To(Equal(baselineFilterMetric)) + }) }) }) diff --git a/felix/iptables/table.go b/felix/iptables/table.go index 94dabefc4fa..79c302a04e0 100644 --- a/felix/iptables/table.go +++ b/felix/iptables/table.go @@ -492,6 +492,7 @@ func (t *Table) InsertOrAppendRules(chainName string, rules []generictables.Rule t.chainToInsertedRules[chainName] = rules numRulesDelta := len(rules) - len(oldRules) t.gaugeNumRules.Add(float64(numRulesDelta)) + t.logCxt.WithField("numRulesDelta", numRulesDelta).Debug("Added to gauge") t.dirtyInsertAppend.Add(chainName) // Incref any newly-referenced chains, then decref the old ones. By incrementing first we @@ -515,6 +516,7 @@ func (t *Table) AppendRules(chainName string, rules []generictables.Rule) { t.chainToAppendedRules[chainName] = rules numRulesDelta := len(rules) - len(oldRules) t.gaugeNumRules.Add(float64(numRulesDelta)) + t.logCxt.WithField("numRulesDelta", numRulesDelta).Debug("Added to gauge") t.dirtyInsertAppend.Add(chainName) // Incref any newly-referenced chains, then decref the old ones. By incrementing first we @@ -546,9 +548,10 @@ func (t *Table) UpdateChain(chain *generictables.Chain) { t.maybeDecrefReferredChains(chain.Name, oldChain.Rules) } t.chainNameToChain[chain.Name] = chain - numRulesDelta := len(chain.Rules) - oldNumRules - t.gaugeNumRules.Add(float64(numRulesDelta)) if t.chainIsReferenced(chain.Name) { + numRulesDelta := len(chain.Rules) - oldNumRules + t.gaugeNumRules.Add(float64(numRulesDelta)) + t.logCxt.WithField("numRulesDelta", numRulesDelta).Debug("Added to gauge") t.dirtyChains.Add(chain.Name) // Defensive: make sure we re-read the dataplane state before we make updates. While the @@ -568,10 +571,11 @@ func (t *Table) RemoveChains(chains []*generictables.Chain) { func (t *Table) RemoveChainByName(name string) { t.logCxt.WithField("chainName", name).Debug("Removing chain from available set.") if oldChain, known := t.chainNameToChain[name]; known { - t.gaugeNumRules.Sub(float64(len(oldChain.Rules))) t.maybeDecrefReferredChains(name, oldChain.Rules) delete(t.chainNameToChain, name) if t.chainIsReferenced(name) { + t.gaugeNumRules.Sub(float64(len(oldChain.Rules))) + t.logCxt.WithField("len_rules", len(oldChain.Rules)).Debug("Subtracted from gauge") t.dirtyChains.Add(name) // Defensive: make sure we re-read the dataplane state before we make updates. While the @@ -624,6 +628,8 @@ func (t *Table) increfChain(chainName string) { t.updateRateLimitedLog.WithField("chainName", chainName).Info("Chain became referenced, marking it for programming") t.dirtyChains.Add(chainName) if chain := t.chainNameToChain[chainName]; chain != nil { + t.gaugeNumRules.Add(float64(len(t.chainNameToChain[chainName].Rules))) + t.logCxt.WithField("len_rules", len(t.chainNameToChain[chainName].Rules)).Debug("Added to gauge") // Recursively incref chains that this chain refers to. If // chain == nil then the chain is likely about to be added, in // which case we'll handle this whe the chain is added. @@ -638,7 +644,10 @@ func (t *Table) decrefChain(chainName string) { log.WithField("chainName", chainName).Debug("Decref chain") if t.chainRefCounts[chainName] == 1 { t.updateRateLimitedLog.WithField("chainName", chainName).Info("Chain no longer referenced, marking it for removal") + if chain := t.chainNameToChain[chainName]; chain != nil { + t.gaugeNumRules.Sub(float64(len(t.chainNameToChain[chainName].Rules))) + t.logCxt.WithField("len_rules", len(t.chainNameToChain[chainName].Rules)).Debug("Subtracted from gauge") // Recursively decref chains that this chain refers to. If // chain == nil then the chain has probably already been deleted // in which case we'll already have done the decrefs. From dfd8b239ae0da1ca43b2ae62200c0fdc10e27618 Mon Sep 17 00:00:00 2001 From: Jiawei Huang Date: Mon, 25 Nov 2024 08:32:26 -0800 Subject: [PATCH 07/11] Update sigs.k8s.io/knftables to v0.0.18 (#9523) Update sigs.k8s.io/knftables to the latest release that includes the changes we need from [1] so we don't need to replace. [1] https://github.com/kubernetes-sigs/knftables/pull/12 --- go.mod | 5 +---- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 02d22f61d41..60cbd936d0b 100644 --- a/go.mod +++ b/go.mod @@ -110,7 +110,7 @@ require ( modernc.org/memory v1.7.2 sigs.k8s.io/controller-runtime v0.17.0 sigs.k8s.io/kind v0.22.0 - sigs.k8s.io/knftables v0.0.15 + sigs.k8s.io/knftables v0.0.18 sigs.k8s.io/network-policy-api v0.1.5 sigs.k8s.io/yaml v1.4.0 ) @@ -329,7 +329,4 @@ replace ( k8s.io/mount-utils => k8s.io/mount-utils v0.30.5 k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.30.5 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.30.5 - - // Use an untagged knftables version that has changes we need. - sigs.k8s.io/knftables => sigs.k8s.io/knftables v0.0.17-0.20240627140917-8d2660d78107 ) diff --git a/go.sum b/go.sum index 499cea42d59..305e2ab845e 100644 --- a/go.sum +++ b/go.sum @@ -1329,8 +1329,8 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= -sigs.k8s.io/knftables v0.0.17-0.20240627140917-8d2660d78107 h1:8t9LaiWa6YJkc3YtCGzLIZXKGfZrWB4/NGcQEV+GIHU= -sigs.k8s.io/knftables v0.0.17-0.20240627140917-8d2660d78107/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= +sigs.k8s.io/knftables v0.0.18 h1:6Duvmu0s/HwGifKrtl6G3AyAPYlWiZqTgS8bkVMiyaE= +sigs.k8s.io/knftables v0.0.18/go.mod h1:f/5ZLKYEUPUhVjUCg6l80ACdL7CIIyeL0DxfgojGRTk= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= From 7acfd45f8d15f5eed38e5afbad007fe67ac846ee Mon Sep 17 00:00:00 2001 From: "tuti." Date: Mon, 25 Nov 2024 08:32:52 -0800 Subject: [PATCH 08/11] [release tool] Update to allowing building and publishing images in hashrelease (#9429) * update cleaning old hashreleases * Allow building and publishing images in hashrelease * fix building images for hashrelease - key-cert-provisioner: add release-build and release-publish targets - add ability to skip release notes * address review feedback * fix publishing images for hashrelease - add custom registry to pinnedversion - allowing skipping validation for publish - fail if hashrelease latest is set while using custom registry - move hashrelease validation to calico manager - update .gitignore to ignore publishing identifier files - fix key-cert-provisioner publishing * update publish prereqs - check hashrelease server config if publishHashrelease == true - move release-verify from Makefile to golang tool * merge format fix * utilize operator optimization * remove Registry in pinnedversion * release notes uses projectcalico org * update hashrelease pipeline * address review comments --- .gitignore | 1 + .semaphore/release/hashrelease.yml | 4 +- apiserver/Makefile | 10 - app-policy/Makefile | 14 +- calicoctl/Makefile | 10 - cni-plugin/Makefile | 26 +- key-cert-provisioner/Makefile | 3 + kube-controllers/Makefile | 28 -- node/Makefile | 7 +- release/build/main.go | 112 +++-- release/internal/hashreleaseserver/config.go | 4 +- release/internal/hashreleaseserver/server.go | 40 +- .../internal/pinnedversion/pinnedversion.go | 79 +--- release/pkg/manager/calico/manager.go | 431 ++++++++++++++---- release/pkg/manager/calico/options.go | 52 ++- release/pkg/manager/operator/manager.go | 4 + release/pkg/manager/operator/options.go | 7 + release/pkg/tasks/hashrelease.go | 172 +------ typha/Makefile | 8 - 19 files changed, 531 insertions(+), 481 deletions(-) diff --git a/.gitignore b/.gitignore index 2ca02fdd851..46c700445d8 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ node/windows-packaging/nssm.exe _output builder.coverprofile *.log +.release-*.* /* Created by local kind cluster */ hack/test/kind/kind diff --git a/.semaphore/release/hashrelease.yml b/.semaphore/release/hashrelease.yml index 9c9f5782bb9..3bc6e5297ad 100644 --- a/.semaphore/release/hashrelease.yml +++ b/.semaphore/release/hashrelease.yml @@ -40,12 +40,10 @@ blocks: jobs: - name: Build and publish hashrelease commands: + - if [[ ${SEMAPHORE_PIPELINE_PROMOTION} == "true" ]]; then export BUILD_IMAGES=true; export SKIP_PUBLISH_IMAGES=false; fi - make hashrelease prologue: commands: - export GITHUB_TOKEN=${MARVIN_GITHUB_TOKEN} - cd release - make build - env_vars: - - name: IS_HASHRELEASE - value: "true" diff --git a/apiserver/Makefile b/apiserver/Makefile index 9cf9edafae2..a4fe4a28139 100644 --- a/apiserver/Makefile +++ b/apiserver/Makefile @@ -223,16 +223,6 @@ release-build: .release-$(VERSION).created $(MAKE) FIPS=true retag-build-images-with-registries IMAGETAG=latest-fips RELEASE=true LATEST_IMAGE_TAG=latest-fips touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run calico/apiserver | grep 'Version:\s*$(VERSION)$$'; then \ - echo "Reported version:" `docker run calico/apiserver` "\nExpected version: $(VERSION)"; \ - false; \ - else \ - echo "Version check passed\n"; \ - fi - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: diff --git a/app-policy/Makefile b/app-policy/Makefile index 3b5cb5cd01e..e14a584ab95 100644 --- a/app-policy/Makefile +++ b/app-policy/Makefile @@ -75,7 +75,7 @@ build-all: $(VALIDARCHES) .PHONY: build ## Build the binary for the current architecture and platform -build: +build: $(MAKE) $(BINDIR)/dikastes-$(ARCH) ARCH=$(ARCH) $(MAKE) $(BINDIR)/healthz-$(ARCH) ARCH=$(ARCH) @@ -103,7 +103,7 @@ endif ############################################################################### -# Protobufs, +# Protobufs, # # 1. defer to felix's makefile for felixbackend stuff # 2. build proto for healthz @@ -202,16 +202,6 @@ release-build: .release-$(VERSION).created $(MAKE) FIPS=true retag-build-images-with-registries IMAGETAG=latest-fips RELEASE=true LATEST_IMAGE_TAG=latest-fips touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run $(DIKASTES_IMAGE):$(VERSION)-$(ARCH) /dikastes --version | grep '^$(VERSION)$$'; then \ - echo "Reported version:" `docker run $(DIKASTES_IMAGE):$(VERSION)-$(ARCH) /dikastes --version` "\nExpected version: $(VERSION)"; \ - false; \ - else \ - echo "Version check passed\n"; \ - fi - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: diff --git a/calicoctl/Makefile b/calicoctl/Makefile index 68502a68d55..ac421cb16d1 100644 --- a/calicoctl/Makefile +++ b/calicoctl/Makefile @@ -260,16 +260,6 @@ release-build: .release-$(VERSION).created $(MAKE) retag-build-images-with-registries IMAGETAG=latest RELEASE=true touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run $(CALICOCTL_IMAGE):$(VERSION)-$(ARCH) version | grep 'Version:\s*$(VERSION)$$'; then \ - echo "Reported version:" `docker run $(CALICOCTL_IMAGE):$(VERSION)-$(ARCH) version` "\nExpected version: $(VERSION)"; \ - false; \ - else \ - echo "Version check passed\n"; \ - fi - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: diff --git a/cni-plugin/Makefile b/cni-plugin/Makefile index ea1a342a7e8..33327f81079 100644 --- a/cni-plugin/Makefile +++ b/cni-plugin/Makefile @@ -284,29 +284,7 @@ release-build: .release-$(VERSION).created $(MAKE) FIPS=true retag-build-images-with-registries RELEASE=true IMAGETAG=latest-fips LATEST_IMAGE_TAG=latest-fips touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - $(MAKE) release-verify-version IMAGE=calico/cni:$(VERSION)-$(ARCH) - $(MAKE) release-verify-version IMAGE=calico/cni:$(VERSION)-fips-$(ARCH) - $(MAKE) release-verify-version IMAGE=quay.io/calico/cni:$(VERSION)-$(ARCH) - $(MAKE) release-verify-version IMAGE=quay.io/calico/cni:$(VERSION)-fips-$(ARCH) - # Check that the FIPS binaries have the correct symbols. - $(MAKE) release-verify-fips IMAGE=calico/cni:$(VERSION)-fips-$(ARCH) - $(MAKE) release-verify-fips IMAGE=quay.io/calico/cni:$(VERSION)-fips-$(ARCH) - -release-verify-version: - docker run --rm $(IMAGE) calico -v | grep -x $(VERSION) || ( echo "Reported version does not match" && exit 1 ) - docker run --rm $(IMAGE) calico-ipam -v | grep -x $(VERSION) || ( echo "Reported version does not match" && exit 1 ) - -release-verify-fips: - rm -rf .tmp && mkdir -p .tmp - # Copy binaries from the image so we can analyze them. - sh -c "docker create --name calico-cni-verify $(IMAGE); docker cp calico-cni-verify:/opt/cni/bin/install .tmp/calico; docker rm -f calico-cni-verify" - go tool nm .tmp/calico | grep '_Cfunc__goboringcrypto_' 1> /dev/null || echo "ERROR: Binary in image '$(IMAGE)' is missing expected goboring symbols" - rm -rf .tmp - -release-publish: release-prereqs release-verify .release-$(VERSION).published +release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: $(MAKE) push-images-to-registries push-manifests IMAGETAG=$(VERSION) RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) $(MAKE) FIPS=true push-images-to-registries push-manifests IMAGETAG=$(VERSION)-fips RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) @@ -318,7 +296,7 @@ release-publish: release-prereqs release-verify .release-$(VERSION).published # WARNING: Only run this target if this release is the latest stable release. Do NOT # run this target for alpha / beta / release candidate builds, or patches to earlier Calico versions. ## Pushes `latest` release images. WARNING: Only run this for latest stable releases. -release-publish-latest: release-prereqs release-verify +release-publish-latest: release-prereqs # Check latest versions match. if ! docker run $(CNI_PLUGIN_IMAGE):latest-$(ARCH) calico -v | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run $(CNI_PLUGIN_IMAGE):latest-$(ARCH) calico -v` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi if ! docker run quay.io/$(CNI_PLUGIN_IMAGE):latest-$(ARCH) calico -v | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run quay.io/$(CNI_PLUGIN_IMAGE):latest-$(ARCH) calico -v` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi diff --git a/key-cert-provisioner/Makefile b/key-cert-provisioner/Makefile index b621b031bec..e3110d06ec9 100644 --- a/key-cert-provisioner/Makefile +++ b/key-cert-provisioner/Makefile @@ -30,6 +30,9 @@ KEY_CERT_PROVISIONER_CREATED=.key-cert-provisioner.created-$(ARCH)-fips VALIDARCHES=amd64 BINDIR=bin/$(ARCH)-fips LATEST_TAG=latest-fips +BUILD_IMAGES=$(KEY_CERT_PROVISIONER_IMAGE) +PUSH_IMAGES= $(foreach registry,$(DEV_REGISTRIES),$(addprefix $(registry)/,$(BUILD_IMAGES))) +RELEASE_IMAGES= $(foreach registry,$(RELEASE_REGISTRIES),$(addprefix $(registry)/,$(BUILD_IMAGES))) else KEY_CERT_PROVISIONER_CREATED=.key-cert-provisioner.created-$(ARCH) BINDIR=bin diff --git a/kube-controllers/Makefile b/kube-controllers/Makefile index 0cc9388f91f..4e76b7eb67e 100644 --- a/kube-controllers/Makefile +++ b/kube-controllers/Makefile @@ -179,33 +179,6 @@ release-build: .release-$(VERSION).created $(MAKE) FIPS=true retag-build-images-with-registries RELEASE=true IMAGETAG=latest-fips LATEST_IMAGE_TAG=latest-fips touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run $(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run $(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi - if ! docker run quay.io/$(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run quay.io/$(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi - -## Pushes a github release and release artifacts produced by `make release-build`. -release-publish: release-prereqs .release-$(VERSION).published -.release-$(VERSION).published: - $(MAKE) push-images-to-registries push-manifests IMAGETAG=$(VERSION) RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) - $(MAKE) FIPS=true push-images-to-registries push-manifests IMAGETAG=$(VERSION)-fips RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) - touch $@ - - -# WARNING: Only run this target if this release is the latest stable release. Do NOT -# run this target for alpha / beta / release candidate builds, or patches to earlier Calico versions. -## Pushes `latest` release images. WARNING: Only run this for latest stable releases. -release-publish-latest: release-prereqs - # Check latest versions match. - touch $@ - -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run $(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run $(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi - if ! docker run quay.io/$(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run quay.io/$(KUBE_CONTROLLERS_IMAGE):$(VERSION)-$(ARCH) --version` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: @@ -213,7 +186,6 @@ release-publish: release-prereqs .release-$(VERSION).published $(MAKE) FIPS=true push-images-to-registries push-manifests IMAGETAG=$(VERSION)-fips RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) touch $@ - # WARNING: Only run this target if this release is the latest stable release. Do NOT # run this target for alpha / beta / release candidate builds, or patches to earlier Calico versions. ## Pushes `latest` release images. WARNING: Only run this for latest stable releases. diff --git a/node/Makefile b/node/Makefile index 4193039a737..ea9bf587022 100644 --- a/node/Makefile +++ b/node/Makefile @@ -475,11 +475,6 @@ release-build: .release-$(VERSION).created release-windows-archive: release-prereqs $(MAKE) build-windows-archive WINDOWS_ARCHIVE_TAG=$(VERSION) -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - if ! docker run $(NODE_IMAGE):$(VERSION)-$(ARCH) versions | grep '^$(VERSION)$$'; then echo "Reported version:" `docker run $(NODE_IMAGE):$(VERSION)-$(ARCH) versions` "\nExpected version: $(VERSION)"; false; else echo "\nVersion check passed\n"; fi - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: @@ -495,7 +490,7 @@ release-publish: release-prereqs .release-$(VERSION).published # WARNING: Only run this target if this release is the latest stable release. Do NOT # run this target for alpha / beta / release candidate builds, or patches to earlier Calico versions. ## Pushes `latest` release images. WARNING: Only run this for latest stable releases. -release-publish-latest: release-verify +release-publish-latest: $(MAKE) push-images-to-registries push-manifests IMAGETAG=latest RELEASE=$(RELEASE) CONFIRM=$(CONFIRM) # Push Windows images. $(MAKE) release-windows IMAGETAG=latest CONFIRM=$(CONFIRM) diff --git a/release/build/main.go b/release/build/main.go index a3a8777bd75..7dabbdf687e 100644 --- a/release/build/main.go +++ b/release/build/main.go @@ -25,6 +25,7 @@ import ( "gopkg.in/natefinch/lumberjack.v2" "github.com/projectcalico/calico/release/internal/config" + "github.com/projectcalico/calico/release/internal/hashreleaseserver" "github.com/projectcalico/calico/release/internal/outputs" "github.com/projectcalico/calico/release/internal/pinnedversion" "github.com/projectcalico/calico/release/internal/registry" @@ -58,9 +59,10 @@ const ( newBranchFlag = "new-branch-version" // Configuration flags for the release publish command. - skipPublishImagesFlag = "skip-publish-images" - skipPublishGitTag = "skip-publish-git-tag" - skipPublishGithubRelease = "skip-publish-github-release" + skipPublishImagesFlag = "skip-publish-images" + skipPublishGitTagFlag = "skip-publish-git-tag" + skipPublishGithubReleaseFlag = "skip-publish-github-release" + skipPublishHashreleaseFlag = "skip-publish-hashrelease-server" ) var ( @@ -159,8 +161,8 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { &cli.StringFlag{Name: repoFlag, Usage: "Git repository", EnvVars: []string{"GIT_REPO"}, Value: config.DefaultRepo}, &cli.BoolFlag{Name: skipValidationFlag, Usage: "Skip all pre-build validation", Value: false}, &cli.BoolFlag{Name: skipBranchCheckFlag, Usage: "Skip check that this is a valid release branch.", Value: false}, - &cli.BoolFlag{Name: buildImagesFlag, Usage: "Build images from local codebase. If false, will use images from CI instead.", Value: false}, - &cli.StringFlag{Name: imageRegistryFlag, Usage: "Specify image registry to use", Value: ""}, + &cli.BoolFlag{Name: buildImagesFlag, Usage: "Build images from local codebase. If false, will use images from CI instead.", EnvVars: []string{"BUILD_IMAGES"}, Value: false}, + &cli.StringSliceFlag{Name: imageRegistryFlag, Usage: "Specify image registry or registries to use", EnvVars: []string{"REGISTRIES"}, Value: &cli.StringSlice{}}, &cli.StringFlag{Name: operatorOrgFlag, Usage: "Operator git organization", EnvVars: []string{"OPERATOR_GIT_ORGANIZATION"}, Value: config.OperatorDefaultOrg}, &cli.StringFlag{Name: operatorRepoFlag, Usage: "Operator git repository", EnvVars: []string{"OPERATOR_GIT_REPO"}, Value: config.OperatorDefaultRepo}, &cli.StringFlag{Name: operatorImageFlag, Usage: "Specify the operator image to use", EnvVars: []string{"OPERATOR_IMAGE"}, Value: config.OperatorDefaultImage}, @@ -171,7 +173,7 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { if c.Bool(skipValidationFlag) && !c.Bool(skipBranchCheckFlag) { return fmt.Errorf("%s must be set if %s is set", skipBranchCheckFlag, skipValidationFlag) } - if c.String(imageRegistryFlag) != "" && c.String(operatorRegistryFlag) == "" { + if len(c.StringSlice(imageRegistryFlag)) > 0 && c.String(operatorRegistryFlag) == "" { return fmt.Errorf("%s must be set if %s is set", operatorRegistryFlag, imageRegistryFlag) } if c.String(operatorImageFlag) != "" && c.String(operatorRegistryFlag) == "" { @@ -180,7 +182,7 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { return fmt.Errorf("%s must be set if %s is set", operatorImageFlag, operatorRegistryFlag) } if !cfg.CI.IsCI { - if c.String(imageRegistryFlag) == "" && c.Bool(buildImagesFlag) { + if len(c.StringSlice(imageRegistryFlag)) == 0 && c.Bool(buildImagesFlag) { logrus.Warn("Local builds should specify an image registry using the --dev-registry flag") } if c.String(operatorRegistryFlag) == registry.QuayRegistry && c.String(operatorImageFlag) == config.OperatorDefaultImage { @@ -237,6 +239,7 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { operator.WithValidate(!c.Bool(skipValidationFlag)), operator.WithReleaseBranchValidation(!c.Bool(skipBranchCheckFlag)), operator.WithVersion(versions.OperatorVersion.FormattedString()), + operator.WithCalicoDirectory(cfg.RepoRootDir), } o := operator.NewManager(operatorOpts...) if err := o.Build(cfg.TmpFolderPath()); err != nil { @@ -259,8 +262,8 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { calico.WithRepoRemote(cfg.GitRemote), calico.WithArchitectures(cfg.Arches), } - if reg := c.String(imageRegistryFlag); reg != "" { - opts = append(opts, calico.WithImageRegistries([]string{reg})) + if reg := c.StringSlice(imageRegistryFlag); len(reg) > 0 { + opts = append(opts, calico.WithImageRegistries(reg)) } r := calico.NewManager(opts...) @@ -268,13 +271,13 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { return err } - // For real releases, release notes are generated prior to building the release. For hash releases, - // generate a set of release notes and add them to the hashrelease directory. + // For real releases, release notes are generated prior to building the release. + // For hash releases, generate a set of release notes and add them to the hashrelease directory. releaseVersion, err := version.DetermineReleaseVersion(versions.ProductVersion, cfg.DevTagSuffix) if err != nil { - return fmt.Errorf("Failed to determine release version: %v", err) + return fmt.Errorf("failed to determine release version: %v", err) } - if _, err := outputs.ReleaseNotes(c.String(orgFlag), cfg.GithubToken, cfg.RepoRootDir, filepath.Join(dir, releaseNotesDir), releaseVersion); err != nil { + if _, err := outputs.ReleaseNotes(config.DefaultOrg, cfg.GithubToken, cfg.RepoRootDir, filepath.Join(dir, releaseNotesDir), releaseVersion); err != nil { return err } @@ -288,6 +291,11 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { Name: "publish", Usage: "Publish hashrelease from _output/ to hashrelease server", Flags: []cli.Flag{ + &cli.StringFlag{Name: orgFlag, Usage: "Git organization", EnvVars: []string{"ORGANIZATION"}, Value: config.DefaultOrg}, + &cli.StringFlag{Name: repoFlag, Usage: "Git repository", EnvVars: []string{"GIT_REPO"}, Value: config.DefaultRepo}, + &cli.StringSliceFlag{Name: imageRegistryFlag, Usage: "Specify image registry or registries to use", EnvVars: []string{"REGISTRIES"}, Value: &cli.StringSlice{}}, + &cli.BoolFlag{Name: skipPublishImagesFlag, Usage: "Skip publishing of container images to registry/registries", EnvVars: []string{"PUBLISH_IMAGES"}, Value: false}, + &cli.BoolFlag{Name: skipPublishHashreleaseFlag, Usage: "Skip publishing to hashrelease server", Value: false}, &cli.BoolFlag{Name: latestFlag, Usage: "Promote this release as the latest for this stream", Value: true}, &cli.BoolFlag{Name: skipValidationFlag, Usage: "Skip pre-build validation", Value: false}, &cli.BoolFlag{Name: skipImageScanFlag, Usage: "Skip sending images to image scan service.", Value: false}, @@ -295,23 +303,31 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { Action: func(c *cli.Context) error { configureLogging("hashrelease-publish.log") + // If using a custom registry, do not set the hashrelease as latest + if len(c.StringSlice(imageRegistryFlag)) > 0 && c.Bool(latestFlag) { + return fmt.Errorf("cannot set hashrelease as latest when using a custom registry") + } + // If skipValidationFlag is set, then we will also skip the image scan. Ensure the user // has set the correct flags. if c.Bool(skipValidationFlag) && !c.Bool(skipImageScanFlag) { return fmt.Errorf("%s must be set if %s is set", skipImageScanFlag, skipValidationFlag) } - // Extract the version from pinned-version.yaml. - hash, err := pinnedversion.RetrievePinnedVersionHash(cfg.TmpFolderPath()) + // Extract the pinned version as a hashrelease. + hashrel, err := pinnedversion.LoadHashrelease(cfg.RepoRootDir, cfg.TmpFolderPath(), dir) if err != nil { return err } + if c.Bool(latestFlag) { + hashrel.Latest = true + } // Check if the hashrelease has already been published. - if published, err := tasks.HashreleasePublished(cfg, hash); err != nil { + if published, err := tasks.HashreleasePublished(cfg, hashrel.Hash); err != nil { return err } else if published { - return fmt.Errorf("hashrelease %s has already been published", hash) + return fmt.Errorf("%s hashrelease (%s) has already been published", hashrel.Name, hashrel.Hash) } // Push the operator hashrelease first before validaion @@ -325,10 +341,38 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { if err := o.Publish(cfg.TmpFolderPath()); err != nil { return err } - if !c.Bool(skipValidationFlag) { - tasks.HashreleaseValidate(cfg, c.Bool(skipImageScanFlag)) + + opts := []calico.Option{ + calico.WithRepoRoot(cfg.RepoRootDir), + calico.IsHashRelease(), + calico.WithVersions(&version.Data{ + ProductVersion: version.New(hashrel.ProductVersion), + OperatorVersion: version.New(hashrel.OperatorVersion), + }), + calico.WithGithubOrg(c.String(orgFlag)), + calico.WithRepoName(c.String(repoFlag)), + calico.WithRepoRemote(cfg.GitRemote), + calico.WithValidate(!c.Bool(skipValidationFlag)), + calico.WithTmpDir(cfg.TmpFolderPath()), + calico.WithHashrelease(*hashrel, cfg.HashreleaseServerConfig), + calico.WithPublishImages(!c.Bool(skipPublishImagesFlag)), + calico.WithPublishHashrelease(!c.Bool(skipPublishHashreleaseFlag)), + calico.WithImageScanning(!c.Bool(skipImageScanFlag), cfg.ImageScannerConfig), + } + if reg := c.StringSlice(imageRegistryFlag); len(reg) > 0 { + opts = append(opts, calico.WithImageRegistries(reg)) + } + r := calico.NewManager(opts...) + if err := r.PublishRelease(); err != nil { + return err + } + + // Send a slack message to notify that the hashrelease has been published. + if !c.Bool(skipPublishHashreleaseFlag) { + if err := tasks.HashreleaseSlackMessage(cfg, hashrel); err != nil { + return err + } } - tasks.HashreleasePush(cfg, dir, c.Bool(latestFlag)) return nil }, }, @@ -340,8 +384,7 @@ func hashreleaseSubCommands(cfg *config.Config) []*cli.Command { Aliases: []string{"gc"}, Action: func(c *cli.Context) error { configureLogging("hashrelease-garbage-collect.log") - tasks.HashreleaseCleanRemote(cfg) - return nil + return hashreleaseserver.CleanOldHashreleases(&cfg.HashreleaseServerConfig) }, }, } @@ -383,8 +426,9 @@ func releaseSubCommands(cfg *config.Config) []*cli.Command { Flags: []cli.Flag{ &cli.StringFlag{Name: orgFlag, Usage: "Git organization", EnvVars: []string{"ORGANIZATION"}, Value: config.DefaultOrg}, &cli.StringFlag{Name: repoFlag, Usage: "Git repository", EnvVars: []string{"GIT_REPO"}, Value: config.DefaultRepo}, + &cli.BoolFlag{Name: buildImagesFlag, Usage: "Build images from local codebase. If false, will use images from CI instead.", EnvVars: []string{"BUILD_IMAGES"}, Value: true}, &cli.BoolFlag{Name: skipValidationFlag, Usage: "Skip pre-build validation", Value: false}, - &cli.StringFlag{Name: imageRegistryFlag, Usage: "Specify image registry to use", Value: ""}, + &cli.StringSliceFlag{Name: imageRegistryFlag, Usage: "Specify image registry or registries to use", EnvVars: []string{"REGISTRIES"}, Value: &cli.StringSlice{}}, }, Action: func(c *cli.Context) error { configureLogging("release-build.log") @@ -412,12 +456,13 @@ func releaseSubCommands(cfg *config.Config) []*cli.Command { calico.WithGithubOrg(c.String(orgFlag)), calico.WithRepoName(c.String(repoFlag)), calico.WithRepoRemote(cfg.GitRemote), + calico.WithBuildImages(c.Bool(buildImagesFlag)), } if c.Bool(skipValidationFlag) { opts = append(opts, calico.WithValidate(false)) } - if reg := c.String(imageRegistryFlag); reg != "" { - opts = append(opts, calico.WithImageRegistries([]string{reg})) + if reg := c.StringSlice(imageRegistryFlag); len(reg) > 0 { + opts = append(opts, calico.WithImageRegistries(reg)) } r := calico.NewManager(opts...) return r.Build() @@ -431,13 +476,14 @@ func releaseSubCommands(cfg *config.Config) []*cli.Command { Flags: []cli.Flag{ &cli.StringFlag{Name: orgFlag, Usage: "Git organization", EnvVars: []string{"ORGANIZATION"}, Value: config.DefaultOrg}, &cli.StringFlag{Name: repoFlag, Usage: "Git repository", EnvVars: []string{"GIT_REPO"}, Value: config.DefaultRepo}, - &cli.BoolFlag{Name: skipPublishImagesFlag, Usage: "Skip publishing of container images to registry", Value: false}, - &cli.BoolFlag{Name: skipPublishGitTag, Usage: "Skip publishing of tag to git repository", Value: false}, - &cli.BoolFlag{Name: skipPublishGithubRelease, Usage: "Skip publishing of release to Github", Value: false}, - &cli.StringFlag{Name: imageRegistryFlag, Usage: "Specify image registry to use", Value: ""}, + &cli.BoolFlag{Name: skipPublishImagesFlag, Usage: "Skip publishing of container images to registry", EnvVars: []string{"SKIP_PUBLISH_IMAGES"}, Value: false}, + &cli.BoolFlag{Name: skipPublishGitTagFlag, Usage: "Skip publishing of tag to git repository", Value: false}, + &cli.BoolFlag{Name: skipPublishGithubReleaseFlag, Usage: "Skip publishing of release to Github", Value: false}, + &cli.StringSliceFlag{Name: imageRegistryFlag, Usage: "Specify image registry or registries to use", EnvVars: []string{"REGISTRIES"}, Value: &cli.StringSlice{}}, }, Action: func(c *cli.Context) error { configureLogging("release-publish.log") + ver, operatorVer, err := version.VersionsFromManifests(cfg.RepoRootDir) if err != nil { return err @@ -449,13 +495,15 @@ func releaseSubCommands(cfg *config.Config) []*cli.Command { OperatorVersion: operatorVer, }), calico.WithOutputDir(filepath.Join(baseUploadDir, ver.FormattedString())), - calico.WithPublishOptions(!c.Bool(skipPublishImagesFlag), !c.Bool(skipPublishGitTag), !c.Bool(skipPublishGithubRelease)), calico.WithGithubOrg(c.String(orgFlag)), calico.WithRepoName(c.String(repoFlag)), calico.WithRepoRemote(cfg.GitRemote), + calico.WithPublishImages(!c.Bool(skipPublishImagesFlag)), + calico.WithPublishGitTag(!c.Bool(skipPublishGitTagFlag)), + calico.WithPublishGithubRelease(!c.Bool(skipPublishGithubReleaseFlag)), } - if reg := c.String(imageRegistryFlag); reg != "" { - opts = append(opts, calico.WithImageRegistries([]string{reg})) + if reg := c.StringSlice(imageRegistryFlag); len(reg) > 0 { + opts = append(opts, calico.WithImageRegistries(reg)) } r := calico.NewManager(opts...) return r.PublishRelease() diff --git a/release/internal/hashreleaseserver/config.go b/release/internal/hashreleaseserver/config.go index 28bf3da2167..d0f197af2da 100644 --- a/release/internal/hashreleaseserver/config.go +++ b/release/internal/hashreleaseserver/config.go @@ -38,8 +38,8 @@ type Config struct { KnownHosts string `envconfig:"DOCS_KNOWN_HOSTS"` } -// rshVars returns the ssh command for rsync to use for the connection -func (s *Config) rshVars() string { +// RSHCommand returns the ssh command for rsync to use for the connection +func (s *Config) RSHCommand() string { str := []string{"ssh", "-i", s.Key, "-p", s.Port, "-q", "-o StrictHostKeyChecking=yes"} if s.KnownHosts != "" { str = append(str, "-o UserKnownHostsFile="+s.KnownHosts) diff --git a/release/internal/hashreleaseserver/server.go b/release/internal/hashreleaseserver/server.go index 8e89c01a521..ece725d5dd4 100644 --- a/release/internal/hashreleaseserver/server.go +++ b/release/internal/hashreleaseserver/server.go @@ -25,8 +25,6 @@ import ( "time" "github.com/sirupsen/logrus" - - "github.com/projectcalico/calico/release/internal/command" ) const ( @@ -52,6 +50,12 @@ type Hashrelease struct { // Stream is the version the hashrelease is for (e.g master, v3.19) Stream string + // ProductVersion is the product version in the hashrelease + ProductVersion string + + // OperatorVersion is the operator version for the hashreleaseq + OperatorVersion string + // Source is the source of hashrelease content Source string @@ -62,11 +66,11 @@ type Hashrelease struct { Latest bool } -func (h Hashrelease) URL() string { +func (h *Hashrelease) URL() string { return fmt.Sprintf("https://%s.%s", h.Name, BaseDomain) } -func remoteDocsPath(user string) string { +func RemoteDocsPath(user string) string { path := "files" if user != "root" { path = filepath.Join("home", "core", "disk", "docs-preview", path) @@ -75,7 +79,7 @@ func remoteDocsPath(user string) string { } func remoteReleasesLibraryPath(user string) string { - return filepath.Join(remoteDocsPath(user), "all-releases") + return filepath.Join(RemoteDocsPath(user), "all-releases") } func HasHashrelease(hash string, cfg *Config) bool { @@ -86,25 +90,13 @@ func HasHashrelease(hash string, cfg *Config) bool { return false } -// PublishHashrelease publishes a hashrelease to the server -func PublishHashrelease(rel Hashrelease, cfg *Config) error { - logrus.WithFields(logrus.Fields{ - "hashrelease": rel.Name, - "hash": rel.Hash, - "source": rel.Source, - }).Debug("Publishing hashrelease") - dir := rel.Source + "/" - if _, err := command.Run("rsync", []string{"--stats", "-az", "--delete", fmt.Sprintf("--rsh=%s", cfg.rshVars()), dir, fmt.Sprintf("%s:%s/%s", cfg.HostString(), remoteDocsPath(cfg.User), rel.Name)}); err != nil { - logrus.WithError(err).Error("Failed to publish hashrelease") +// SetHashreleaseAsLatest sets the hashrelease as the latest for the stream +func SetHashreleaseAsLatest(rel Hashrelease, cfg *Config) error { + logrus.Debugf("Updating latest hashrelease for %s stream to %s", rel.Stream, rel.Name) + if _, err := runSSHCommand(cfg, fmt.Sprintf(`echo "%s/" > %s/latest-os/%s.txt && echo %s >> %s`, rel.URL(), RemoteDocsPath(cfg.User), rel.Stream, rel.Name, remoteReleasesLibraryPath(cfg.User))); err != nil { + logrus.WithError(err).Error("Failed to update latest hashrelease and hashrelease library") return err } - if rel.Latest { - logrus.Debugf("Updating latest hashrelease for %s stream to %s", rel.Stream, rel.Name) - if _, err := runSSHCommand(cfg, fmt.Sprintf(`echo "%s/" > %s/latest-os/%s.txt && echo %s >> %s`, rel.URL(), remoteDocsPath(cfg.User), rel.Stream, rel.Name, remoteReleasesLibraryPath(cfg.User))); err != nil { - logrus.WithError(err).Error("Failed to update latest hashrelease and hashrelease library") - return err - } - } return nil } @@ -136,7 +128,7 @@ func CleanOldHashreleases(cfg *Config) error { } func listHashreleases(cfg *Config) ([]Hashrelease, error) { - cmd := fmt.Sprintf("ls -lt --time-style=+'%%Y-%%m-%%d %%H:%%M:%%S' %s", remoteDocsPath(cfg.User)) + cmd := fmt.Sprintf("ls -lt --time-style=+'%%Y-%%m-%%d %%H:%%M:%%S' %s", RemoteDocsPath(cfg.User)) out, err := runSSHCommand(cfg, cmd) if err != nil { logrus.WithError(err).Error("Failed to get list of hashreleases") @@ -162,7 +154,7 @@ func listHashreleases(cfg *Config) ([]Hashrelease, error) { } if re.MatchString(name) { releases = append(releases, Hashrelease{ - Name: filepath.Join(remoteDocsPath(cfg.User), name), + Name: filepath.Join(RemoteDocsPath(cfg.User), name), Time: time, }) } diff --git a/release/internal/pinnedversion/pinnedversion.go b/release/internal/pinnedversion/pinnedversion.go index c742a6e0f04..ae62fa42a83 100644 --- a/release/internal/pinnedversion/pinnedversion.go +++ b/release/internal/pinnedversion/pinnedversion.go @@ -197,63 +197,6 @@ func RetrievePinnedOperator(outputDir string) (registry.OperatorComponent, error }, nil } -// RetrievePinnedOperatorVersion retrieves the operator version from the pinned version file. -func RetrievePinnedOperatorVersion(outputDir string) (string, error) { - operator, err := RetrievePinnedOperator(outputDir) - if err != nil { - return "", err - } - return operator.Version, nil -} - -// RetrieveReleaseName retrieves the release name from the pinned version file. -func RetrieveReleaseName(outputDir string) (string, error) { - pinnedVersionPath := pinnedVersionFilePath(outputDir) - var pinnedversion PinnedVersionFile - if pinnedVersionData, err := os.ReadFile(pinnedVersionPath); err != nil { - return "", err - } else if err := yaml.Unmarshal([]byte(pinnedVersionData), &pinnedversion); err != nil { - return "", err - } - return pinnedversion[0].ReleaseName, nil -} - -// RetrievePinnedProductVersion retrieves the product version from the pinned version file. -func RetrievePinnedProductVersion(outputDir string) (string, error) { - pinnedVersionPath := pinnedVersionFilePath(outputDir) - var pinnedversion PinnedVersionFile - if pinnedVersionData, err := os.ReadFile(pinnedVersionPath); err != nil { - return "", err - } else if err := yaml.Unmarshal([]byte(pinnedVersionData), &pinnedversion); err != nil { - return "", err - } - return pinnedversion[0].Title, nil -} - -// RetrievePinnedVersionNote retrieves the note from the pinned version file. -func RetrievePinnedVersionNote(outputDir string) (string, error) { - pinnedVersionPath := pinnedVersionFilePath(outputDir) - var pinnedversion PinnedVersionFile - if pinnedVersionData, err := os.ReadFile(pinnedVersionPath); err != nil { - return "", err - } else if err := yaml.Unmarshal([]byte(pinnedVersionData), &pinnedversion); err != nil { - return "", err - } - return pinnedversion[0].Note, nil -} - -// RetrievePinnedVersionHash retrieves the hash from the pinned version file. -func RetrievePinnedVersionHash(outputDir string) (string, error) { - pinnedVersionPath := pinnedVersionFilePath(outputDir) - var pinnedversion PinnedVersionFile - if pinnedVersionData, err := os.ReadFile(pinnedVersionPath); err != nil { - return "", err - } else if err := yaml.Unmarshal([]byte(pinnedVersionData), &pinnedversion); err != nil { - return "", err - } - return pinnedversion[0].Hash, nil -} - // RetrieveComponentsToValidate retrieves the components to validate from the pinned version file. func RetrieveComponentsToValidate(outputDir string) (map[string]registry.Component, error) { pinnedVersionPath := pinnedVersionFilePath(outputDir) @@ -284,3 +227,25 @@ func RetrieveComponentsToValidate(outputDir string) (map[string]registry.Compone } return components, nil } + +func LoadHashrelease(repoRootDir, tmpDir, srcDir string) (*hashreleaseserver.Hashrelease, error) { + productBranch, err := utils.GitBranch(repoRootDir) + if err != nil { + logrus.WithError(err).Errorf("Failed to get %s branch name", utils.ProductName) + return nil, err + } + pinnedVersion, err := RetrievePinnedVersion(tmpDir) + if err != nil { + logrus.WithError(err).Fatal("Failed to get pinned version") + } + return &hashreleaseserver.Hashrelease{ + Name: pinnedVersion.ReleaseName, + Hash: pinnedVersion.Hash, + Note: pinnedVersion.Note, + Stream: version.DeterminePublishStream(productBranch, pinnedVersion.Title), + ProductVersion: pinnedVersion.Title, + OperatorVersion: pinnedVersion.TigeraOperator.Version, + Source: srcDir, + Time: time.Now(), + }, nil +} diff --git a/release/pkg/manager/calico/manager.go b/release/pkg/manager/calico/manager.go index 5e5b12c51af..cc8db01864f 100644 --- a/release/pkg/manager/calico/manager.go +++ b/release/pkg/manager/calico/manager.go @@ -19,6 +19,7 @@ import ( "fmt" "os" "path/filepath" + "reflect" "regexp" "strings" @@ -27,6 +28,10 @@ import ( "gopkg.in/yaml.v2" "github.com/projectcalico/calico/release/internal/command" + "github.com/projectcalico/calico/release/internal/hashreleaseserver" + "github.com/projectcalico/calico/release/internal/imagescanner" + "github.com/projectcalico/calico/release/internal/pinnedversion" + "github.com/projectcalico/calico/release/internal/registry" "github.com/projectcalico/calico/release/internal/utils" ) @@ -41,6 +46,48 @@ var ( "asia.gcr.io/projectcalico-org", "us.gcr.io/projectcalico-org", } + + // Directories that publish images. + imageReleaseDirs = []string{ + "apiserver", + "app-policy", + "calicoctl", + "cni-plugin", + "key-cert-provisioner", + "kube-controllers", + "node", + "pod2daemon", + "typha", + } + + // Directories for Windows. + windowsReleaseDirs = []string{ + "node", + "cni-plugin", + } + + // images that should be expected for a release. + // This list needs to be kept up-to-date + // with the actual release artifacts produced for a release + // as images are added or removed. + images = []string{ + "calico/apiserver", + "calico/cni", + "calico/csi", + "calico/ctl", + "calico/dikastes", + "calico/key-cert-provisioner", + "calico/kube-controllers", + "calico/node", + "calico/node-driver-registrar", + "calico/pod2daemon-flexvol", + "calico/test-signer", + "calico/typha", + } + windowsImages = []string{ + "calico/cni-windows", + "calico/node-windows", + } ) func NewManager(opts ...Option) *CalicoManager { @@ -121,6 +168,9 @@ type CalicoManager struct { // which we should read them for publishing. outputDir string + // tmpDir is the directory to which we should write temporary files. + tmpDir string + // Fine-tuning configuration for publishing. publishImages bool publishTag bool @@ -144,28 +194,23 @@ type CalicoManager struct { // architectures is the list of architectures for which we should build images. // If empty, we build for all. architectures []string + + // hashrelease configuration. + publishHashrelease bool + hashrelease hashreleaseserver.Hashrelease + hashreleaseConfig hashreleaseserver.Config + + // image scanning configuration. + imageScanning bool + imageScanningConfig imagescanner.Config } -// releaseImages returns the set of images that should be expected for a release. -// This function needs to be kept up-to-date with the actual release artifacts produced for a -// release if images are added or removed. func releaseImages(version, operatorVersion string) []string { - return []string{ - fmt.Sprintf("quay.io/tigera/operator:%s", operatorVersion), - fmt.Sprintf("calico/typha:%s", version), - fmt.Sprintf("calico/ctl:%s", version), - fmt.Sprintf("calico/node:%s", version), - fmt.Sprintf("calico/cni:%s", version), - fmt.Sprintf("calico/apiserver:%s", version), - fmt.Sprintf("calico/kube-controllers:%s", version), - fmt.Sprintf("calico/dikastes:%s", version), - fmt.Sprintf("calico/pod2daemon-flexvol:%s", version), - fmt.Sprintf("calico/key-cert-provisioner:%s", version), - fmt.Sprintf("calico/csi:%s", version), - fmt.Sprintf("calico/node-driver-registrar:%s", version), - fmt.Sprintf("calico/cni-windows:%s", version), - fmt.Sprintf("calico/node-windows:%s", version), + imgList := []string{fmt.Sprintf("quay.io/tigera/operator:%s", operatorVersion)} + for _, img := range append(images, windowsImages...) { + imgList = append(imgList, fmt.Sprintf("%s:%s", img, version)) } + return imgList } func (r *CalicoManager) helmChartVersion() string { @@ -212,14 +257,8 @@ func (r *CalicoManager) Build() error { }() } - if r.buildImages { - // Build the container images for the release if configured to do so. - // - // If skipped, we expect that the images for this version have already - // been published as part of CI. - if err = r.BuildContainerImages(ver); err != nil { - return err - } + if err = r.buildContainerImages(); err != nil { + return err } // Build the helm chart. @@ -377,16 +416,6 @@ func (r *CalicoManager) TagRelease(ver string) error { return nil } -func (r *CalicoManager) BuildContainerImages(ver string) error { - // Build container images for the release. - if err := r.buildContainerImages(ver); err != nil { - return err - } - // TODO: Assert the produced images are OK. e.g., have correct - // commit and version information compiled in. - return nil -} - func (r *CalicoManager) BuildHelm() error { if r.isHashRelease { // We need to modify values.yaml to use the correct version. @@ -425,33 +454,53 @@ func (r *CalicoManager) buildOCPBundle() error { return nil } -func (r *CalicoManager) PublishRelease() error { - // Determine the currently checked-out tag. - ver, err := r.git("describe", "--exact-match", "--tags", "HEAD") - if err != nil { - return fmt.Errorf("failed to get tag for checked-out commit, is there one? %s", err) +func (r *CalicoManager) publishToHashreleaseServer() error { + if !r.publishHashrelease { + logrus.Info("Skipping publishing to hashrelease server") + return nil + } + logrus.WithField("note", r.hashrelease.Note).Info("Publishing hashrelease") + dir := r.hashrelease.Source + "/" + if _, err := r.runner.Run("rsync", + []string{ + "--stats", "-az", "--delete", + fmt.Sprintf("--rsh=%s", r.hashreleaseConfig.RSHCommand()), dir, + fmt.Sprintf("%s:%s/%s", r.hashreleaseConfig.HostString(), hashreleaseserver.RemoteDocsPath(r.hashreleaseConfig.User), r.hashrelease.Name), + }, nil); err != nil { + logrus.WithError(err).Error("Failed to publish hashrelease") + return err } + if r.hashrelease.Latest { + return hashreleaseserver.SetHashreleaseAsLatest(r.hashrelease, &r.hashreleaseConfig) + } + return nil +} +func (r *CalicoManager) PublishRelease() error { // Check that the environment has the necessary prereqs. - if err = r.publishPrereqs(); err != nil { + if err := r.publishPrereqs(); err != nil { return err } // Publish container images. - if err = r.publishContainerImages(ver); err != nil { + if err := r.publishContainerImages(); err != nil { return fmt.Errorf("failed to publish container images: %s", err) } - if r.publishTag { - // If all else is successful, push the git tag. - if _, err = r.git("push", r.remote, ver); err != nil { - return fmt.Errorf("failed to push git tag: %s", err) + if r.isHashRelease { + if err := r.publishToHashreleaseServer(); err != nil { + return fmt.Errorf("failed to publish hashrelease: %s", err) + } + } else { + // Publish the git tag. + if err := r.publishGitTag(); err != nil { + return fmt.Errorf("failed to publish git tag: %s", err) } - } - // Publish the release to github. - if err = r.publishGithubRelease(ver); err != nil { - return fmt.Errorf("failed to publish github release: %s", err) + // Publish the release to github. + if err := r.publishGithubRelease(); err != nil { + return fmt.Errorf("failed to publish github release: %s", err) + } } return nil @@ -462,22 +511,221 @@ func (r *CalicoManager) releasePrereqs() error { // Check that we're not on the master branch. We never cut releases from master. branch := r.determineBranch() if branch == "master" { - return fmt.Errorf("Cannot cut release from branch: %s", branch) + return fmt.Errorf("cannot cut release from branch: %s", branch) } // Make sure we have a github token - needed for publishing to GH. // Strictly only needed for publishing, but we check during release anyway so // that we don't get all the way through the build to find out we're missing it! if token := os.Getenv("GITHUB_TOKEN"); token == "" { - return fmt.Errorf("No GITHUB_TOKEN present in environment") + return fmt.Errorf("no GITHUB_TOKEN present in environment") } - // TODO: Make sure the environment isn't dirty. + // If we are releasing to projectcalico/calico, make sure we are releasing to the default registries. + if r.githubOrg == "projectcalico" && r.repo == "calico" { + if !reflect.DeepEqual(r.imageRegistries, defaultRegistries) { + return fmt.Errorf("image registries cannot be different from default registries for a release") + } + } + + return r.assertImageVersions() +} + +type imageExistsResult struct { + name string + image string + exists bool + err error +} + +func imgExists(name string, component registry.Component, ch chan imageExistsResult) { + r := imageExistsResult{ + name: name, + image: component.String(), + } + r.exists, r.err = registry.ImageExists(component.ImageRef()) + ch <- r +} + +// Check that the environment has the necessary prereqs for publishing hashrelease +func (r *CalicoManager) hashreleasePrereqs() error { + if r.publishHashrelease { + if !r.hashreleaseConfig.Valid() { + return fmt.Errorf("missing hashrelease server configuration") + } + } + images, err := pinnedversion.RetrieveComponentsToValidate(r.tmpDir) + if err != nil { + return fmt.Errorf("failed to get components to validate: %s", err) + } + if r.publishImages { + return r.assertImageVersions() + } else { + results := make(map[string]imageExistsResult, len(images)) + ch := make(chan imageExistsResult) + for name, component := range images { + go imgExists(name, component, ch) + } + for range images { + res := <-ch + results[res.name] = res + } + failedImageList := []string{} + for name, r := range results { + logrus.WithFields(logrus.Fields{ + "image": r.image, + "exists": r.exists, + }).Info("Validating image") + if r.err != nil || !r.exists { + logrus.WithError(r.err).WithField("image", name).Error("Error checking image") + failedImageList = append(failedImageList, images[name].String()) + } else { + logrus.WithField("image", name).Info("Image exists") + } + } + failedCount := len(failedImageList) + if failedCount > 0 { + return fmt.Errorf("failed to validate %d images: %s", failedCount, strings.Join(failedImageList, ", ")) + } + } + if r.imageScanning { + logrus.Info("Sending images to ISS") + imageList := []string{} + for _, component := range images { + imageList = append(imageList, component.String()) + } + imageScanner := imagescanner.New(r.imageScanningConfig) + err := imageScanner.Scan(imageList, r.hashrelease.Stream, false, r.tmpDir) + if err != nil { + // Error is logged and ignored as this is not considered a fatal error + logrus.WithError(err).Error("Failed to scan images") + } + } + return nil +} + +// Check that the images exists with the correct version. +func (r *CalicoManager) assertImageVersions() error { + for _, img := range images { + imageName := strings.TrimPrefix(img, "calico/") + switch img { + case "calico/apiserver": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion)}, nil) + // apiserver always returns an error because there is no kubeconfig, log and ignore it. + if err != nil { + logrus.WithError(err).WithField("image", img).Warn("error getting version from image") + } + if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/cni": + for _, reg := range r.imageRegistries { + for _, cmd := range []string{"calico", "calico-ipam"} { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion), cmd, "-v"}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", cmd, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + } + case "calico/csi": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"inspect", `--format='{{ index .Config.Labels "version" }}'`, fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion)}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/ctl": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion), "version"}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/dikastes": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"inspect", `--format='{{ index .Config.Labels "version" }}'`, fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion)}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/key-cert-provisioner": + // key-cert-provisioner does not have version information in the image. + case "calico/kube-controllers": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion), "--version"}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/node": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion), "versions"}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/node-driver-registrar": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"inspect", `--format='{{ index .Config.Labels "version" }}'`, fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion)}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/pod2daemon-flexvol": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"inspect", `--format='{{ index .Config.Labels "version" }}'`, fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion)}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + case "calico/test-signer": + // test-signer does not have version information in the image. + case "calico/typha": + for _, reg := range r.imageRegistries { + out, err := r.runner.Run("docker", []string{"run", "--rm", fmt.Sprintf("%s/%s:%s", reg, imageName, r.calicoVersion), "calico-typha", "--version"}, nil) + if err != nil { + return fmt.Errorf("failed to run get version from %s image: %s", imageName, err) + } else if !strings.Contains(out, r.calicoVersion) { + return fmt.Errorf("version does not match for image %s/%s:%s", reg, imageName, r.calicoVersion) + } + } + default: + return fmt.Errorf("unknown image: %s, update assertion to include validating image", img) + } + } return nil } // Prerequisites specific to publishing a release. func (r *CalicoManager) publishPrereqs() error { + if !r.validate { + logrus.Warn("Skipping pre-publish validation") + return nil + } + if dirty, err := utils.GitIsDirty(r.repoRoot); dirty || err != nil { + return fmt.Errorf("there are uncommitted changes in the repository, please commit or stash them before publishing the release") + } + if r.isHashRelease { + return r.hashreleasePrereqs() + } // TODO: Verify all required artifacts are present. return r.releasePrereqs() } @@ -612,7 +860,6 @@ func (r *CalicoManager) buildReleaseTar(ver string, targetDir string) error { fmt.Sprintf("%s/cni:%s", registry, ver): filepath.Join(imgDir, "calico-cni.tar"), fmt.Sprintf("%s/kube-controllers:%s", registry, ver): filepath.Join(imgDir, "calico-kube-controllers.tar"), fmt.Sprintf("%s/pod2daemon-flexvol:%s", registry, ver): filepath.Join(imgDir, "calico-pod2daemon.tar"), - fmt.Sprintf("%s/key-cert-provisioner:%s", registry, ver): filepath.Join(imgDir, "calico-key-cert-provisioner.tar"), fmt.Sprintf("%s/dikastes:%s", registry, ver): filepath.Join(imgDir, "calico-dikastes.tar"), fmt.Sprintf("%s/flannel-migration-controller:%s", registry, ver): filepath.Join(imgDir, "calico-flannel-migration-controller.tar"), } @@ -661,28 +908,18 @@ func (r *CalicoManager) buildReleaseTar(ver string, targetDir string) error { return nil } -func (r *CalicoManager) buildContainerImages(ver string) error { - releaseDirs := []string{ - "node", - "pod2daemon", - "key-cert-provisioner", - "cni-plugin", - "apiserver", - "kube-controllers", - "calicoctl", - "app-policy", - "typha", - "felix", +func (r *CalicoManager) buildContainerImages() error { + if !r.buildImages { + logrus.Info("Skip building container images") + return nil } + releaseDirs := append(imageReleaseDirs, "felix") - windowsReleaseDirs := []string{ - "node", - "cni-plugin", - } + logrus.Info("Building container images") // Build env. env := append(os.Environ(), - fmt.Sprintf("VERSION=%s", ver), + fmt.Sprintf("VERSION=%s", r.calicoVersion), fmt.Sprintf("DEV_REGISTRIES=%s", strings.Join(r.imageRegistries, " ")), ) @@ -711,7 +948,16 @@ func (r *CalicoManager) buildContainerImages(ver string) error { return nil } -func (r *CalicoManager) publishGithubRelease(ver string) error { +func (r *CalicoManager) publishGitTag() error { + if !r.publishTag { + logrus.Info("Skipping git tag") + return nil + } + _, err := r.git("push", r.remote, r.calicoVersion) + return err +} + +func (r *CalicoManager) publishGithubRelease() error { if !r.publishGithub { logrus.Info("Skipping github release") return nil @@ -732,19 +978,19 @@ Additional links: - [VPP data plane release information](https://github.com/projectcalico/vpp-dataplane/blob/master/RELEASE_NOTES.md) ` - sv, err := semver.NewVersion(strings.TrimPrefix(ver, "v")) + sv, err := semver.NewVersion(strings.TrimPrefix(r.calicoVersion, "v")) if err != nil { return err } formatters := []string{ // Alternating placeholder / filler. We can't use backticks in the multiline string above, // so we replace anything that needs to be backticked into it here. - "{version}", ver, + "{version}", r.calicoVersion, "{branch}", fmt.Sprintf("release-v%d.%d", sv.Major, sv.Minor), "{release_stream}", fmt.Sprintf("v%d.%d", sv.Major, sv.Minor), - "{release_tar}", fmt.Sprintf("`release-%s.tgz`", ver), - "{calico_windows_zip}", fmt.Sprintf("`calico-windows-%s.zip`", ver), - "{helm_chart}", fmt.Sprintf("`tigera-operator-%s.tgz`", ver), + "{release_tar}", fmt.Sprintf("`release-%s.tgz`", r.calicoVersion), + "{calico_windows_zip}", fmt.Sprintf("`calico-windows-%s.zip`", r.calicoVersion), + "{helm_chart}", fmt.Sprintf("`tigera-operator-%s.tgz`", r.calicoVersion), } replacer := strings.NewReplacer(formatters...) releaseNote := replacer.Replace(releaseNoteTemplate) @@ -752,42 +998,25 @@ Additional links: args := []string{ "-username", r.githubOrg, "-repository", r.repo, - "-name", ver, + "-name", r.calicoVersion, "-body", releaseNote, "-draft", - ver, + r.calicoVersion, r.uploadDir(), } _, err = r.runner.RunInDir(r.repoRoot, "./bin/ghr", args, nil) return err } -func (r *CalicoManager) publishContainerImages(ver string) error { +func (r *CalicoManager) publishContainerImages() error { if !r.publishImages { logrus.Info("Skipping image publish") return nil } - releaseDirs := []string{ - "pod2daemon", - "key-cert-provisioner", - "cni-plugin", - "apiserver", - "kube-controllers", - "calicoctl", - "app-policy", - "typha", - "node", - } - - windowsReleaseDirs := []string{ - "node", - "cni-plugin", - } - env := append(os.Environ(), - fmt.Sprintf("IMAGETAG=%s", ver), - fmt.Sprintf("VERSION=%s", ver), + fmt.Sprintf("IMAGETAG=%s", r.calicoVersion), + fmt.Sprintf("VERSION=%s", r.calicoVersion), "RELEASE=true", "CONFIRM=true", fmt.Sprintf("DEV_REGISTRIES=%s", strings.Join(r.imageRegistries, " ")), @@ -796,7 +1025,7 @@ func (r *CalicoManager) publishContainerImages(ver string) error { // We allow for a certain number of retries when publishing each directory, since // network flakes can occasionally result in images failing to push. maxRetries := 1 - for _, dir := range releaseDirs { + for _, dir := range imageReleaseDirs { attempt := 0 for { out, err := r.makeInDirectoryWithOutput(filepath.Join(r.repoRoot, dir), "release-publish", env...) diff --git a/release/pkg/manager/calico/options.go b/release/pkg/manager/calico/options.go index c2641474e59..bb43961a09f 100644 --- a/release/pkg/manager/calico/options.go +++ b/release/pkg/manager/calico/options.go @@ -15,6 +15,8 @@ package calico import ( + "github.com/projectcalico/calico/release/internal/hashreleaseserver" + "github.com/projectcalico/calico/release/internal/imagescanner" "github.com/projectcalico/calico/release/internal/version" ) @@ -63,11 +65,30 @@ func WithOutputDir(outputDir string) Option { } } -func WithPublishOptions(images, tag, github bool) Option { +func WithPublishImages(publish bool) Option { return func(r *CalicoManager) error { - r.publishImages = images - r.publishTag = tag - r.publishGithub = github + r.publishImages = publish + return nil + } +} + +func WithPublishGitTag(publish bool) Option { + return func(r *CalicoManager) error { + r.publishTag = publish + return nil + } +} + +func WithPublishGithubRelease(publish bool) Option { + return func(r *CalicoManager) error { + r.publishGithub = publish + return nil + } +} + +func WithPublishHashrelease(publish bool) Option { + return func(r *CalicoManager) error { + r.publishHashrelease = publish return nil } } @@ -120,3 +141,26 @@ func WithReleaseBranchPrefix(prefix string) Option { return nil } } + +func WithTmpDir(tmpDir string) Option { + return func(r *CalicoManager) error { + r.tmpDir = tmpDir + return nil + } +} + +func WithHashrelease(hashrelease hashreleaseserver.Hashrelease, cfg hashreleaseserver.Config) Option { + return func(r *CalicoManager) error { + r.hashrelease = hashrelease + r.hashreleaseConfig = cfg + return nil + } +} + +func WithImageScanning(scanning bool, cfg imagescanner.Config) Option { + return func(r *CalicoManager) error { + r.imageScanning = scanning + r.imageScanningConfig = cfg + return nil + } +} diff --git a/release/pkg/manager/operator/manager.go b/release/pkg/manager/operator/manager.go index 025196a1abc..dc4fcdae900 100644 --- a/release/pkg/manager/operator/manager.go +++ b/release/pkg/manager/operator/manager.go @@ -43,6 +43,9 @@ type OperatorManager struct { // dir is the absolute path to the root directory of the operator repository dir string + // calicoDir is the absolute path to the root directory of the calico repository + calicoDir string + // origin remote repository remote string @@ -119,6 +122,7 @@ func (o *OperatorManager) Build(outputDir string) error { env := os.Environ() env = append(env, fmt.Sprintf("OS_VERSIONS=%s", componentsVersionPath)) env = append(env, fmt.Sprintf("COMMON_VERSIONS=%s", componentsVersionPath)) + env = append(env, fmt.Sprintf("CALICO_CRDS_DIR=%s", o.calicoDir)) if _, err := o.make("gen-versions", env); err != nil { return err } diff --git a/release/pkg/manager/operator/options.go b/release/pkg/manager/operator/options.go index b209e0f9d34..685a7bbc78c 100644 --- a/release/pkg/manager/operator/options.go +++ b/release/pkg/manager/operator/options.go @@ -23,6 +23,13 @@ func WithOperatorDirectory(root string) Option { } } +func WithCalicoDirectory(dir string) Option { + return func(o *OperatorManager) error { + o.calicoDir = dir + return nil + } +} + func WithRepoRemote(remote string) Option { return func(o *OperatorManager) error { o.remote = remote diff --git a/release/pkg/tasks/hashrelease.go b/release/pkg/tasks/hashrelease.go index d27b4d48611..f5b3f75f8a9 100644 --- a/release/pkg/tasks/hashrelease.go +++ b/release/pkg/tasks/hashrelease.go @@ -18,7 +18,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "github.com/sirupsen/logrus" @@ -26,116 +25,10 @@ import ( "github.com/projectcalico/calico/release/internal/hashreleaseserver" "github.com/projectcalico/calico/release/internal/imagescanner" "github.com/projectcalico/calico/release/internal/pinnedversion" - "github.com/projectcalico/calico/release/internal/registry" "github.com/projectcalico/calico/release/internal/slack" "github.com/projectcalico/calico/release/internal/utils" - "github.com/projectcalico/calico/release/internal/version" ) -type imageExistsResult struct { - name string - image string - exists bool - err error -} - -func imgExists(name string, component registry.Component, ch chan imageExistsResult) { - r := imageExistsResult{ - name: name, - image: component.String(), - } - r.exists, r.err = registry.ImageExists(component.ImageRef()) - ch <- r -} - -// HashreleaseValidate validates the images in the hashrelease. -// These images are checked to ensure they exist in the registry -// as they should have been pushed in the standard build process. -func HashreleaseValidate(cfg *config.Config, skipISS bool) { - tmpDir := cfg.TmpFolderPath() - name, err := pinnedversion.RetrieveReleaseName(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get release name") - } - productBranch, err := utils.GitBranch(cfg.RepoRootDir) - if err != nil { - logrus.WithError(err).Fatalf("Failed to get %s branch name", utils.ProductName) - } - productVersion, err := pinnedversion.RetrievePinnedProductVersion(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get candidate name") - } - operatorVersion, err := pinnedversion.RetrievePinnedOperatorVersion(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get operator version") - } - images, err := pinnedversion.RetrieveComponentsToValidate(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get pinned version") - } - results := make(map[string]imageExistsResult, len(images)) - - ch := make(chan imageExistsResult) - for name, component := range images { - go imgExists(name, component, ch) - } - for range images { - res := <-ch - results[res.name] = res - } - failedImages := []registry.Component{} - failedImageNames := []string{} - for name, r := range results { - logrus.WithFields(logrus.Fields{ - "image": r.image, - "exists": r.exists, - }).Info("Validating image") - if r.err != nil || !r.exists { - logrus.WithError(r.err).WithField("image", name).Error("Error checking image") - failedImageNames = append(failedImageNames, name) - failedImages = append(failedImages, images[name]) - } else { - logrus.WithField("image", name).Info("Image exists") - } - } - failedCount := len(failedImageNames) - if failedCount > 0 { - // We only care to send failure messages if we are in CI - if cfg.CI.IsCI { - slackMsg := slack.Message{ - Config: cfg.SlackConfig, - Data: slack.MessageData{ - ReleaseName: name, - Product: utils.DisplayProductName(), - Stream: version.DeterminePublishStream(productBranch, productVersion), - Version: productVersion, - OperatorVersion: operatorVersion, - CIURL: cfg.CI.URL(), - FailedImages: failedImages, - }, - } - if err := slackMsg.SendFailure(logrus.IsLevelEnabled(logrus.DebugLevel)); err != nil { - logrus.WithError(err).Error("Failed to send slack message") - } - } - logrus.WithField("images", strings.Join(failedImageNames, ", ")). - Fatalf("Failed to validate %d images, see above for details", failedCount) - } - if !skipISS { - logrus.Info("Sending images to ISS") - imageList := []string{} - for _, component := range images { - imageList = append(imageList, component.String()) - } - imageScanner := imagescanner.New(cfg.ImageScannerConfig) - err := imageScanner.Scan(imageList, version.DeterminePublishStream(productBranch, productVersion), false, cfg.OutputDir) - if err != nil { - // Error is logged and ignored as this is not considered a fatal error - logrus.WithError(err).Error("Failed to scan images") - } - } -} - // HashreleasePublished checks if the hashrelease has already been published. // If it has, the process is halted. func HashreleasePublished(cfg *config.Config, hash string) (bool, error) { @@ -152,54 +45,20 @@ func HashreleasePublished(cfg *config.Config, hash string) (bool, error) { return hashreleaseserver.HasHashrelease(hash, &cfg.HashreleaseServerConfig), nil } -// HashreleaseValidate publishes the hashrelease -func HashreleasePush(cfg *config.Config, path string, setLatest bool) { - tmpDir := cfg.TmpFolderPath() - name, err := pinnedversion.RetrieveReleaseName(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get release name") - } - note, err := pinnedversion.RetrievePinnedVersionNote(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get pinned version note") - } - productBranch, err := utils.GitBranch(cfg.RepoRootDir) - if err != nil { - logrus.WithError(err).Fatalf("Failed to get %s branch name", utils.ProductName) - } - productVersion, err := pinnedversion.RetrievePinnedProductVersion(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get candidate name") - } - operatorVersion, err := pinnedversion.RetrievePinnedOperatorVersion(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get operator version") - } - releaseHash, err := pinnedversion.RetrievePinnedVersionHash(tmpDir) - if err != nil { - logrus.WithError(err).Fatal("Failed to get release hash") - } - hashrel := hashreleaseserver.Hashrelease{ - Name: name, - Hash: releaseHash, - Note: note, - Stream: version.DeterminePublishStream(productBranch, productVersion), - Source: path, - Latest: setLatest, +// HashreleaseSlackMessage sends a slack message to notify that a hashrelease has been published. +func HashreleaseSlackMessage(cfg *config.Config, hashrel *hashreleaseserver.Hashrelease) error { + scanResultURL := imagescanner.RetrieveResultURL(cfg.TmpFolderPath()) + if scanResultURL == "" { + logrus.Warn("No image scan result URL found") } - logrus.WithField("note", note).Info("Publishing hashrelease") - if err := hashreleaseserver.PublishHashrelease(hashrel, &cfg.HashreleaseServerConfig); err != nil { - logrus.WithError(err).Fatal("Failed to publish hashrelease") - } - scanResultURL := imagescanner.RetrieveResultURL(cfg.OutputDir) slackMsg := slack.Message{ Config: cfg.SlackConfig, Data: slack.MessageData{ - ReleaseName: name, + ReleaseName: hashrel.Name, Product: utils.DisplayProductName(), - Stream: version.DeterminePublishStream(productBranch, productVersion), - Version: productVersion, - OperatorVersion: operatorVersion, + Stream: hashrel.Stream, + Version: hashrel.ProductVersion, + OperatorVersion: hashrel.OperatorVersion, DocsURL: hashrel.URL(), CIURL: cfg.CI.URL(), ImageScanResultURL: scanResultURL, @@ -209,17 +68,10 @@ func HashreleasePush(cfg *config.Config, path string, setLatest bool) { logrus.WithError(err).Error("Failed to send slack message") } logrus.WithFields(logrus.Fields{ - "name": name, + "name": hashrel.Name, "URL": hashrel.URL(), - }).Info("Published hashrelease") -} - -// HashreleaseCleanRemote cleans up old hashreleases on the docs host -func HashreleaseCleanRemote(cfg *config.Config) { - logrus.Info("Cleaning up old hashreleases") - if err := hashreleaseserver.CleanOldHashreleases(&cfg.HashreleaseServerConfig); err != nil { - logrus.WithError(err).Fatal("Failed to delete old hashreleases") - } + }).Info("Sent hashrelease publish notification to slack") + return nil } // ReformatHashrelease modifies the generated release output to match diff --git a/typha/Makefile b/typha/Makefile index 16a306efd0d..2e34d1e0004 100644 --- a/typha/Makefile +++ b/typha/Makefile @@ -189,14 +189,6 @@ release-build: .release-$(VERSION).created $(MAKE) FIPS=true retag-build-images-with-registries RELEASE=true IMAGETAG=latest-fips LATEST_IMAGE_TAG=latest-fips touch $@ -## Verifies the release artifacts produces by `make release-build` are correct. -release-verify: release-prereqs - # Check the reported version is correct for each release artifact. - docker run --rm $(TYPHA_IMAGE):$(VERSION)-$(ARCH) calico-typha --version | grep $(VERSION) || ( echo "Reported version:" `docker run --rm $(TYPHA_IMAGE):$(VERSION)-$(ARCH) calico-typha --version` "\nExpected version: $(VERSION)" && exit 1 ) - docker run --rm quay.io/$(TYPHA_IMAGE):$(VERSION)-$(ARCH) calico-typha --version | grep $(VERSION) || ( echo "Reported version:" `docker run --rm quay.io/$(TYPHA_IMAGE):$(VERSION)-$(ARCH) calico-typha --version | grep -x $(VERSION)` "\nExpected version: $(VERSION)" && exit 1 ) - - # TODO: Some sort of quick validation of the produced binaries. - ## Pushes a github release and release artifacts produced by `make release-build`. release-publish: release-prereqs .release-$(VERSION).published .release-$(VERSION).published: From d25a5939cd148c14831f58779e2649e7745647ba Mon Sep 17 00:00:00 2001 From: Jiawei Huang Date: Mon, 25 Nov 2024 21:37:22 -0800 Subject: [PATCH 09/11] Use gcp n4 machine and the latest ubuntu image --- .semaphore/vms/create-test-vm | 6 +++--- felix/.semaphore/create-test-vm | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.semaphore/vms/create-test-vm b/.semaphore/vms/create-test-vm index cbe0d9e8e4d..0995e64564b 100755 --- a/.semaphore/vms/create-test-vm +++ b/.semaphore/vms/create-test-vm @@ -28,11 +28,11 @@ gcloud auth activate-service-account --key-file=$gcp_secret_key function create-vm() { gcloud --quiet compute instances create "${vm_name}" \ --zone=${zone} \ - --machine-type=n1-standard-4 \ - --image=ubuntu-2004-focal-v20211102 \ + --machine-type=n4-standard-4 \ + --image=ubuntu-2004-focal-v20241115 \ --image-project=ubuntu-os-cloud \ --boot-disk-size=$disk_size \ - --boot-disk-type=pd-standard && \ + --boot-disk-type=hyperdisk-balanced && \ ssh_cmd="gcloud --quiet compute ssh --zone=${zone} ubuntu@${vm_name}" for ssh_try in $(seq 1 10); do echo "Trying to SSH in: $ssh_try" diff --git a/felix/.semaphore/create-test-vm b/felix/.semaphore/create-test-vm index 830cdb8acdb..29571b55215 100755 --- a/felix/.semaphore/create-test-vm +++ b/felix/.semaphore/create-test-vm @@ -27,11 +27,11 @@ gcloud auth activate-service-account --key-file=$HOME/secrets/secret.google-serv function create-vm() { gcloud --quiet compute instances create "${vm_name}" \ --zone=${zone} \ - --machine-type=n1-standard-4 \ - --image=ubuntu-2204-jammy-v20240228 \ + --machine-type=n4-standard-4 \ + --image=ubuntu-2204-jammy-v20241119 \ --image-project=ubuntu-os-cloud \ --boot-disk-size=20GB \ - --boot-disk-type=pd-standard && \ + --boot-disk-type=hyperdisk-balanced && \ for ssh_try in $(seq 1 10); do echo "Trying to SSH in: $ssh_try" gcloud --quiet compute ssh --zone=${zone} "ubuntu@${vm_name}" -- echo "Success" && break From 89608985333ab8faa19da224fc5243f119bfd2fb Mon Sep 17 00:00:00 2001 From: Shaun Crampton Date: Tue, 26 Nov 2024 10:27:32 -0700 Subject: [PATCH 10/11] Free dedupe buffer's tracking map after shrink (#9526) Go doesn't free map blocks, even after a map shrinks considerably. The dedupe buffer tends to store a lot of keys for a start-of-day snapshot, make sure we clean up the leaked map capacity once we're back down to zero. Upstream issue: https://github.com/golang/go/issues/20135 --- .../backend/syncersv1/dedupebuffer/dedupe_buffer.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/libcalico-go/lib/backend/syncersv1/dedupebuffer/dedupe_buffer.go b/libcalico-go/lib/backend/syncersv1/dedupebuffer/dedupe_buffer.go index 2da6c02a621..38b6a9cd408 100644 --- a/libcalico-go/lib/backend/syncersv1/dedupebuffer/dedupe_buffer.go +++ b/libcalico-go/lib/backend/syncersv1/dedupebuffer/dedupe_buffer.go @@ -45,7 +45,9 @@ type DedupeBuffer struct { // keyToPendingUpdate holds an entry for each updateWithStringKey in the // pendingUpdates queue - keyToPendingUpdate map[string]*list.Element + keyToPendingUpdate map[string]*list.Element + peakPendingUpdatesLen int + // liveResourceKeys Contains an entry for every key that we have sent to // the consumer and that we have not subsequently sent a deletion for. liveResourceKeys set.Set[string] @@ -176,6 +178,7 @@ func (d *DedupeBuffer) OnUpdatesKeysKnown(updates []api.Update, keys []string) { update: u, }) d.keyToPendingUpdate[key] = element + d.peakPendingUpdatesLen = max(len(d.keyToPendingUpdate), d.peakPendingUpdatesLen) } } queueNowEmpty := d.pendingUpdates.Len() == 0 @@ -252,6 +255,14 @@ func (d *DedupeBuffer) pullNextBatch(buf []any, batchSize int) []any { if u, ok := first.Value.(updateWithStringKey); ok { key := u.key delete(d.keyToPendingUpdate, key) + if len(d.keyToPendingUpdate) == 0 && d.peakPendingUpdatesLen > 100 { + // Map blocks never get freed when a map is scaled down. + // https://github.com/golang/go/issues/20135 + // Opportunistically free the map when it's empty. This can + // free a good amount of RAM after loading a large snapshot. + d.keyToPendingUpdate = map[string]*list.Element{} + d.peakPendingUpdatesLen = 0 + } // Update liveResourceKeys now, before we drop the lock. Once we drop // the lock we're committed to sending these keys. if u.update.Value == nil { From ab3dd9437fcf498681b600c5e7c41f4799873a73 Mon Sep 17 00:00:00 2001 From: tuti Date: Tue, 26 Nov 2024 13:19:28 -0800 Subject: [PATCH 11/11] fix env for nightly builds --- .semaphore/release/hashrelease.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.semaphore/release/hashrelease.yml b/.semaphore/release/hashrelease.yml index 3bc6e5297ad..161f77bc117 100644 --- a/.semaphore/release/hashrelease.yml +++ b/.semaphore/release/hashrelease.yml @@ -40,7 +40,7 @@ blocks: jobs: - name: Build and publish hashrelease commands: - - if [[ ${SEMAPHORE_PIPELINE_PROMOTION} == "true" ]]; then export BUILD_IMAGES=true; export SKIP_PUBLISH_IMAGES=false; fi + - if [[ ${SEMAPHORE_WORKFLOW_TRIGGERED_BY_SCHEDULE} == "true" ]]; then export BUILD_IMAGES=true; export SKIP_PUBLISH_IMAGES=false; fi - make hashrelease prologue: commands: