From 975fcb331159c0c0de650ae8270bbcfbcc20828b Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Thu, 5 Sep 2024 11:38:02 +0800 Subject: [PATCH] using same code for cloudevents integation test --- .github/workflows/cloudevents-integration.yml | 2 +- go.mod | 9 +- go.sum | 6 +- test/integration-test.mk | 16 +- .../cloudevents/deleteoption_test.go | 443 ----------- .../manifestworkreplicaset_test.go | 211 ----- test/integration/cloudevents/source/codec.go | 221 ------ .../integration/cloudevents/source/handler.go | 76 -- test/integration/cloudevents/source/lister.go | 17 - .../cloudevents/source/manifestwork.go | 194 ----- test/integration/cloudevents/source/source.go | 136 ---- .../integration/cloudevents/source/watcher.go | 64 -- .../cloudevents/source/workclientset.go | 46 -- .../cloudevents/statusfeedback_test.go | 726 ------------------ test/integration/cloudevents/suite_test.go | 164 ---- .../cloudevents/updatestrategy_test.go | 444 ----------- test/integration/cloudevents/work_test.go | 241 ------ test/integration/util/assertion.go | 7 +- test/integration/util/authentication.go | 3 +- test/integration/util/mqtt.go | 91 +++ test/integration/util/work.go | 42 + test/integration/work/deleteoption_test.go | 62 +- .../work/manifestworkreplicaset_test.go | 4 +- test/integration/work/statusfeedback_test.go | 14 +- test/integration/work/suite_test.go | 96 ++- test/integration/work/updatestrategy_test.go | 116 ++- test/integration/work/work_test.go | 125 ++- .../go/compute/metadata/CHANGES.md | 26 + .../go/compute/metadata/LICENSE | 202 +++++ .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 579 ++++++++++++++ .../go/compute/metadata/retry.go | 114 +++ .../go/compute/metadata/retry_linux.go | 26 + .../x/oauth2/authhandler/authhandler.go | 94 +++ .../golang.org/x/oauth2/google/appengine.go | 40 + vendor/golang.org/x/oauth2/google/default.go | 317 ++++++++ vendor/golang.org/x/oauth2/google/doc.go | 53 ++ vendor/golang.org/x/oauth2/google/error.go | 64 ++ .../x/oauth2/google/externalaccount/aws.go | 577 ++++++++++++++ .../google/externalaccount/basecredentials.go | 485 ++++++++++++ .../externalaccount/executablecredsource.go | 313 ++++++++ .../google/externalaccount/filecredsource.go | 61 ++ .../x/oauth2/google/externalaccount/header.go | 64 ++ .../programmaticrefreshcredsource.go | 21 + .../google/externalaccount/urlcredsource.go | 79 ++ vendor/golang.org/x/oauth2/google/google.go | 309 ++++++++ .../externalaccountauthorizeduser.go | 114 +++ .../internal/impersonate/impersonate.go | 105 +++ .../google/internal/stsexchange/clientauth.go | 45 ++ .../internal/stsexchange/sts_exchange.go | 125 +++ vendor/golang.org/x/oauth2/google/jwt.go | 102 +++ vendor/golang.org/x/oauth2/google/sdk.go | 201 +++++ vendor/golang.org/x/oauth2/jws/jws.go | 182 +++++ vendor/golang.org/x/oauth2/jwt/jwt.go | 185 +++++ .../grpc/credentials/oauth/oauth.go | 244 ++++++ vendor/modules.txt | 15 +- .../sdk-go/pkg/apis/cluster/v1alpha1/score.go | 43 ++ .../pkg/cloudevents/generic/agentclient.go | 47 +- .../cloudevents/generic/metrics_collector.go | 138 ++++ .../generic/options/grpc/agentoptions.go | 3 +- .../generic/options/grpc/options.go | 54 +- .../options/grpc/protobuf/v1/cloudevent.pb.go | 51 +- .../options/grpc/protobuf/v1/cloudevent.proto | 4 +- .../generic/options/grpc/protocol/option.go | 3 +- .../generic/options/grpc/protocol/protocol.go | 14 +- .../pkg/cloudevents/generic/sourceclient.go | 49 +- .../pkg/cloudevents/generic/types/types.go | 3 + .../work/agent/client/manifestwork.go | 79 +- .../cloudevents/work/agent/lister/lister.go | 17 +- .../pkg/cloudevents/work/errors/errors.go | 36 + .../work/source/client/manifestwork.go | 95 ++- .../cloudevents/work/source/lister/lister.go | 19 +- .../sdk-go/pkg/cloudevents/work/store/base.go | 27 +- .../pkg/cloudevents/work/store/informer.go | 21 +- .../pkg/cloudevents/work/store/interface.go | 4 +- .../pkg/cloudevents/work/store/local.go | 4 +- .../pkg/cloudevents/work/utils/utils.go | 27 +- 77 files changed, 5820 insertions(+), 3263 deletions(-) delete mode 100644 test/integration/cloudevents/deleteoption_test.go delete mode 100644 test/integration/cloudevents/manifestworkreplicaset_test.go delete mode 100644 test/integration/cloudevents/source/codec.go delete mode 100644 test/integration/cloudevents/source/handler.go delete mode 100644 test/integration/cloudevents/source/lister.go delete mode 100644 test/integration/cloudevents/source/manifestwork.go delete mode 100644 test/integration/cloudevents/source/source.go delete mode 100644 test/integration/cloudevents/source/watcher.go delete mode 100644 test/integration/cloudevents/source/workclientset.go delete mode 100644 test/integration/cloudevents/statusfeedback_test.go delete mode 100644 test/integration/cloudevents/suite_test.go delete mode 100644 test/integration/cloudevents/updatestrategy_test.go delete mode 100644 test/integration/cloudevents/work_test.go create mode 100644 test/integration/util/mqtt.go create mode 100644 test/integration/util/work.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/CHANGES.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/README.md create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/retry.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/retry_linux.go create mode 100644 vendor/golang.org/x/oauth2/authhandler/authhandler.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc.go create mode 100644 vendor/golang.org/x/oauth2/google/error.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/aws.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/header.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/score.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics_collector.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors/errors.go diff --git a/.github/workflows/cloudevents-integration.yml b/.github/workflows/cloudevents-integration.yml index 7afa5c4e6..31aedf274 100644 --- a/.github/workflows/cloudevents-integration.yml +++ b/.github/workflows/cloudevents-integration.yml @@ -5,7 +5,7 @@ on: pull_request: paths: - 'pkg/work/spoke/*.go' - - 'test/integration/cloudevents/**' + - 'test/integration/work/**' branches: - main - release-* diff --git a/go.mod b/go.mod index a8b7b5026..e63ca50cd 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,10 @@ module open-cluster-management.io/ocm go 1.22.5 require ( - github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf github.com/davecgh/go-spew v1.1.1 github.com/evanphx/json-patch v5.9.0+incompatible github.com/ghodss/yaml v1.0.0 github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.6.0 github.com/mochi-mqtt/server/v2 v2.6.5 github.com/onsi/ginkgo/v2 v2.20.0 github.com/onsi/gomega v1.34.1 @@ -21,7 +19,6 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 github.com/valyala/fasttemplate v1.2.2 - go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/net v0.28.0 gopkg.in/yaml.v2 v2.4.0 @@ -37,13 +34,14 @@ require ( k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c - open-cluster-management.io/sdk-go v0.14.1-0.20240628095929-9ffb1b19e566 + open-cluster-management.io/sdk-go v0.14.1-0.20240906071839-3e8465851efc sigs.k8s.io/controller-runtime v0.18.5 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 sigs.k8s.io/yaml v1.4.0 ) require ( + cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect @@ -58,6 +56,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudevents/sdk-go/protocol/kafka_confluent/v2 v2.0.0-20240413090539-7fef29478991 // indirect github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect + github.com/cloudevents/sdk-go/v2 v2.15.3-0.20240329120647-e6a74efbacbf // indirect github.com/confluentinc/confluent-kafka-go/v2 v2.3.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -83,6 +82,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -128,6 +128,7 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect golang.org/x/sync v0.8.0 // indirect diff --git a/go.sum b/go.sum index be93000b5..1cda04d04 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= @@ -445,8 +447,8 @@ open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a/go.mod h1:C1VETu/CIQKYfMiVAgNzPEUHjCpL9P1Z/KsGhHa4kl4= open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c h1:gYfgkX/U6fv2d3Ly8D6N1GM9zokORupLSgCxx791zZw= open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= -open-cluster-management.io/sdk-go v0.14.1-0.20240628095929-9ffb1b19e566 h1:8dgPiM3byX/rtOrFJIsea2haV4hSFTND65Tlj1EdK18= -open-cluster-management.io/sdk-go v0.14.1-0.20240628095929-9ffb1b19e566/go.mod h1:xFmN3Db5nN68oLGnstmIRv4us8HJCdXFnBNMXVp0jWY= +open-cluster-management.io/sdk-go v0.14.1-0.20240906071839-3e8465851efc h1:m669Deo9FCIbekScJ1xt8jIglaJG3fH17BFrUhpK5ww= +open-cluster-management.io/sdk-go v0.14.1-0.20240906071839-3e8465851efc/go.mod h1:dSRrKrD3zV36l6Pex3pv/ey3xw6NcvyurC4TxbRhM8w= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= diff --git a/test/integration-test.mk b/test/integration-test.mk index 6d80c7442..ef57665d5 100644 --- a/test/integration-test.mk +++ b/test/integration-test.mk @@ -26,13 +26,15 @@ clean-integration-test: clean: clean-integration-test +build-work-integration: + go test -c ./test/integration/work -o ./work-integration.test + test-registration-integration: ensure-kubebuilder-tools go test -c ./test/integration/registration -o ./registration-integration.test ./registration-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast .PHONY: test-registration-integration -test-work-integration: ensure-kubebuilder-tools - go test -c ./test/integration/work -o ./work-integration.test +test-work-integration: ensure-kubebuilder-tools build-work-integration ./work-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast .PHONY: test-work-integration @@ -51,9 +53,13 @@ test-addon-integration: ensure-kubebuilder-tools ./addon-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast .PHONY: test-addon-integration -test-cloudevents-integration: ensure-kubebuilder-tools - go test -c ./test/integration/cloudevents -o ./cloudevents-integration.test - ./cloudevents-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast +test-cloudevents-integration: ensure-kubebuilder-tools build-work-integration + ./work-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast \ + -ginkgo.skip-file manifestworkreplicaset_test.go \ + -ginkgo.skip-file executor_test.go \ + -ginkgo.skip-file unmanaged_appliedwork_test.go \ + -test.driver=mqtt \ + -v=4 .PHONY: test-cloudevents-integration test-integration: test-registration-operator-integration test-registration-integration test-placement-integration test-work-integration test-addon-integration diff --git a/test/integration/cloudevents/deleteoption_test.go b/test/integration/cloudevents/deleteoption_test.go deleted file mode 100644 index 1f7514ce1..000000000 --- a/test/integration/cloudevents/deleteoption_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "time" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" - - workapiv1 "open-cluster-management.io/api/work/v1" - - commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/work/spoke" - "open-cluster-management.io/ocm/test/integration/util" -) - -var _ = ginkgo.Describe("ManifestWork Delete Option", func() { - var err error - var cancel context.CancelFunc - - var clusterName string - - var work *workapiv1.ManifestWork - var manifests []workapiv1.Manifest - var anotherWork *workapiv1.ManifestWork - var appliedManifestWorkName string - var anotherAppliedManifestWorkName string - - ginkgo.BeforeEach(func() { - clusterName = utilrand.String(5) - - ns := &corev1.Namespace{} - ns.Name = clusterName - _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - - o := spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second - o.WorkloadSourceDriver = workSourceDriver - o.WorkloadSourceConfig = workSourceConfigFileName - o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) - o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"} - - commOptions := commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = clusterName - - go runWorkAgent(ctx, o, commOptions) - - // reset manifests - manifests = nil - }) - - ginkgo.AfterEach(func() { - if cancel != nil { - cancel() - } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.Context("Delete options", func() { - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), - } - work = util.NewManifestWork(clusterName, "", manifests) - }) - - ginkgo.It("Orphan deletion of the whole manifestwork", func() { - work.Spec.DeleteOption = &workapiv1.DeleteOption{ - PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete the work - err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Wait for deletion of manifest work - gomega.Eventually(func() bool { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("Clean the resource when orphan deletion option is removed", func() { - work.Spec.DeleteOption = &workapiv1.DeleteOption{ - PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, - SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ - OrphaningRules: []workapiv1.OrphaningRule{ - { - Group: "", - Resource: "configmaps", - Namespace: clusterName, - Name: cm1, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Remove the delete option - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.DeleteOption = nil - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 1 { - return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete the work - err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Wait for deletion of manifest work - gomega.Eventually(func() bool { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // All of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - }) - }) - - ginkgo.Context("Resource sharing and adoption between manifestworks", func() { - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})), - } - work = util.NewManifestWork(clusterName, "", manifests) - // Create another manifestworks with one shared resource. - anotherWork = util.NewManifestWork(clusterName, "sharing-resource-work", []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, []string{})), - }) - }) - - ginkgo.JustBeforeEach(func() { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - anotherWork, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, anotherWork.UID) - - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { - // ensure configmap exists and get its uid - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - currentUID := curentConfigMap.UID - - // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( - context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete one manifestwork - err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource - gomega.Eventually(func() error { - appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - - // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. - gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - if err != nil { - return err - } - - if currentUID != configMap.UID { - return fmt.Errorf("UID should be equal") - } - - anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( - context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - hasAppliedResourceName := false - hasAppliedResourceUID := false - for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { - if appliedResource.Name == cm1 { - hasAppliedResourceName = true - } - - if appliedResource.UID != string(currentUID) { - hasAppliedResourceUID = true - } - } - - if !hasAppliedResourceName { - return fmt.Errorf("resource Name should be cm1") - } - - if !hasAppliedResourceUID { - return fmt.Errorf("UID should be equal") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { - // ensure configmap exists and get its uid - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - currentUID := curentConfigMap.UID - - // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( - context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Update one manifestwork to remove the shared resource - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = []workapiv1.Manifest{ - manifests[1], - util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"g": "h"}, []string{})), - } - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure the resource is not tracked by the appliedmanifestwork. - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 { - return fmt.Errorf("found applied resource name cm1") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Ensure the configmap is kept and tracked by anotherappliedmanifestwork - gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get( - context.Background(), cm1, metav1.GetOptions{}) - if err != nil { - return err - } - - if currentUID != configMap.UID { - return fmt.Errorf("UID should be equal") - } - - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( - context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - hasAppliedResourceName := false - hasAppliedResourceUID := false - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 { - hasAppliedResourceName = true - } - - if appliedResource.UID != string(currentUID) { - hasAppliedResourceUID = true - } - } - - if !hasAppliedResourceName { - return fmt.Errorf("resource Name should be cm1") - } - - if !hasAppliedResourceUID { - return fmt.Errorf("UID should be equal") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) -}) diff --git a/test/integration/cloudevents/manifestworkreplicaset_test.go b/test/integration/cloudevents/manifestworkreplicaset_test.go deleted file mode 100644 index 970823f23..000000000 --- a/test/integration/cloudevents/manifestworkreplicaset_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "time" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "github.com/openshift/library-go/pkg/controller/controllercmd" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" - - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" - workapiv1 "open-cluster-management.io/api/work/v1" - workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" - - commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/work/hub" - "open-cluster-management.io/ocm/pkg/work/spoke" - "open-cluster-management.io/ocm/test/integration/util" -) - -const mwrsTestCM = "mwrs-test-cm" - -var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { - var err error - var cancel context.CancelFunc - - var clusterAName, clusterBName string - var namespace string - var placement *clusterv1beta1.Placement - var placementDecision *clusterv1beta1.PlacementDecision - var manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet - - ginkgo.BeforeEach(func() { - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - - namespace = utilrand.String(5) - ns := &corev1.Namespace{} - ns.Name = namespace - _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - clusterAName = "cluster-" + utilrand.String(5) - clusterNS := &corev1.Namespace{} - clusterNS.Name = clusterAName - _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, clusterNS, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - clusterBName = "cluster-" + utilrand.String(5) - clusterNS = &corev1.Namespace{} - clusterNS.Name = clusterBName - _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, clusterNS, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - placement = &clusterv1beta1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-placement", - Namespace: namespace, - }, - } - _, err = hubClusterClient.ClusterV1beta1().Placements(namespace).Create(ctx, placement, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - placementDecision = &clusterv1beta1.PlacementDecision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-placement-decision", - Namespace: namespace, - Labels: map[string]string{ - clusterv1beta1.PlacementLabel: placement.Name, - clusterv1beta1.DecisionGroupIndexLabel: "0", - }, - }, - } - decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).Create(ctx, placementDecision, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ - {ClusterName: clusterAName}, - {ClusterName: clusterBName}, - } - _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus(ctx, decision, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - <-time.After(time.Second) - - startCtrl(ctx) - - // start work agents - startAgent(ctx, clusterAName) - startAgent(ctx, clusterBName) - - manifestWorkReplicaSet = &workapiv1alpha1.ManifestWorkReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-work", - Namespace: namespace, - }, - Spec: workapiv1alpha1.ManifestWorkReplicaSetSpec{ - ManifestWorkTemplate: workapiv1.ManifestWorkSpec{ - Workload: workapiv1.ManifestsTemplate{ - Manifests: []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap("default", mwrsTestCM, map[string]string{"a": "b"}, nil)), - }, - }, - }, - PlacementRefs: []workapiv1alpha1.LocalPlacementReference{ - { - Name: placement.Name, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, - }, - }, - }, - } - _, err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.AfterEach(func() { - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - if cancel != nil { - cancel() - } - }) - - ginkgo.Context("Create/Update/Delete a manifestWorkReplicaSet", func() { - ginkgo.It("should create/update/delete successfully", func() { - gomega.Eventually(func() error { - return assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{Total: 2, Available: 2, Applied: 2}, manifestWorkReplicaSet) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - - ginkgo.By("Update decision so manifestworks should be updated") - decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).Get(context.TODO(), placementDecision.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - decision.Status.Decisions = decision.Status.Decisions[:1] - _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - return assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{Total: 1, Available: 1, Applied: 1}, manifestWorkReplicaSet) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - - ginkgo.By("Delete manifestworkreplicaset") - err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - _, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Get(context.TODO(), manifestWorkReplicaSet.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } - - return fmt.Errorf("the mwrs is not deleted, %v", err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - }) - }) -}) - -func startAgent(ctx context.Context, clusterName string) { - o := spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second - o.WorkloadSourceDriver = workSourceDriver - o.WorkloadSourceConfig = mwrsConfigFileName - o.CloudEventsClientID = fmt.Sprintf("%s-work-client", clusterName) - o.CloudEventsClientCodecs = []string{"manifestbundle"} - - commOptions := commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = clusterName - - go runWorkAgent(ctx, o, commOptions) -} - -func startCtrl(ctx context.Context) { - opts := hub.NewWorkHubManagerOptions() - opts.WorkDriver = workSourceDriver - opts.WorkDriverConfig = mwrsConfigFileName - opts.CloudEventsClientID = "mwrsctrl-client" - hubConfig := hub.NewWorkHubManagerConfig(opts) - - // start hub controller - go func() { - err := hubConfig.RunWorkHubManager(ctx, &controllercmd.ControllerContext{ - KubeConfig: hubRestConfig, - EventRecorder: util.NewIntegrationTestEventRecorder("mwrsctrl"), - }) - fmt.Println(err) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - -} - -func assertSummary(summary workapiv1alpha1.ManifestWorkReplicaSetSummary, mwrs *workapiv1alpha1.ManifestWorkReplicaSet) error { - rs, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrs.Namespace).Get(context.TODO(), mwrs.Name, metav1.GetOptions{}) - - if err != nil { - return err - } - - if rs.Status.Summary != summary { - return fmt.Errorf("unexpected summary expected: %v, got :%v", summary, rs.Status.Summary) - } - - return nil -} diff --git a/test/integration/cloudevents/source/codec.go b/test/integration/cloudevents/source/codec.go deleted file mode 100644 index 3ace7081e..000000000 --- a/test/integration/cloudevents/source/codec.go +++ /dev/null @@ -1,221 +0,0 @@ -package source - -import ( - "fmt" - "strconv" - - cloudevents "github.com/cloudevents/sdk-go/v2" - cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - kubetypes "k8s.io/apimachinery/pkg/types" - - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" -) - -type ManifestCodec struct{} - -func (c *ManifestCodec) EventDataType() types.CloudEventsDataType { - return payload.ManifestEventDataType -} - -func (d *ManifestCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { - if eventType.CloudEventsDataType != payload.ManifestEventDataType { - return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) - } - - if len(work.Spec.Workload.Manifests) != 1 { - return nil, fmt.Errorf("too many manifests in the work") - } - - eventBuilder := types.NewEventBuilder(source, eventType). - WithResourceID(string(work.UID)). - WithResourceVersion(work.Generation). - WithClusterName(work.Namespace) - - if !work.GetDeletionTimestamp().IsZero() { - evt := eventBuilder.WithDeletionTimestamp(work.GetDeletionTimestamp().Time).NewEvent() - return &evt, nil - } - - evt := eventBuilder.NewEvent() - - manifest := work.Spec.Workload.Manifests[0] - unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&manifest) - if err != nil { - return nil, fmt.Errorf("failed to convert manifest to unstructured object: %v", err) - } - - evtPayload := &payload.Manifest{ - Manifest: unstructured.Unstructured{Object: unstructuredObj}, - DeleteOption: work.Spec.DeleteOption, - } - - if len(work.Spec.ManifestConfigs) == 1 { - evtPayload.ConfigOption = &payload.ManifestConfigOption{ - FeedbackRules: work.Spec.ManifestConfigs[0].FeedbackRules, - UpdateStrategy: work.Spec.ManifestConfigs[0].UpdateStrategy, - } - } - - if err := evt.SetData(cloudevents.ApplicationJSON, evtPayload); err != nil { - return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) - } - - return &evt, nil -} - -func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { - eventType, err := types.ParseCloudEventsType(evt.Type()) - if err != nil { - return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) - } - - if eventType.CloudEventsDataType != payload.ManifestEventDataType { - return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) - } - - evtExtensions := evt.Context.GetExtensions() - - resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) - if err != nil { - return nil, fmt.Errorf("failed to get resourceid extension: %v", err) - } - - resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) - if err != nil { - return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) - } - - resourceVersionInt, err := strconv.ParseInt(resourceVersion, 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to convert resourceversion - %v to int64", resourceVersion) - } - - clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) - if err != nil { - return nil, fmt.Errorf("failed to get clustername extension: %v", err) - } - - manifestStatus := &payload.ManifestStatus{} - if err := evt.DataAs(manifestStatus); err != nil { - return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) - } - - work := &workv1.ManifestWork{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - UID: kubetypes.UID(resourceID), - ResourceVersion: resourceVersion, - Generation: resourceVersionInt, - Namespace: clusterName, - }, - Status: workv1.ManifestWorkStatus{ - Conditions: manifestStatus.Conditions, - ResourceStatus: workv1.ManifestResourceStatus{ - Manifests: []workv1.ManifestCondition{ - { - Conditions: manifestStatus.Status.Conditions, - StatusFeedbacks: manifestStatus.Status.StatusFeedbacks, - ResourceMeta: manifestStatus.Status.ResourceMeta, - }, - }, - }, - }, - } - - return work, nil -} - -type ManifestBundleCodec struct{} - -func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { - return payload.ManifestBundleEventDataType -} - -func (d *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { - if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { - return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) - } - - eventBuilder := types.NewEventBuilder(source, eventType). - WithResourceID(string(work.UID)). - WithResourceVersion(work.Generation). - WithClusterName(work.Namespace) - - if !work.GetDeletionTimestamp().IsZero() { - evt := eventBuilder.WithDeletionTimestamp(work.GetDeletionTimestamp().Time).NewEvent() - return &evt, nil - } - - evt := eventBuilder.NewEvent() - data := &payload.ManifestBundle{} - data.Manifests = work.Spec.Workload.Manifests - data.ManifestConfigs = work.Spec.ManifestConfigs - data.DeleteOption = work.Spec.DeleteOption - - if err := evt.SetData(cloudevents.ApplicationJSON, data); err != nil { - return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) - } - - return &evt, nil -} - -func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { - eventType, err := types.ParseCloudEventsType(evt.Type()) - if err != nil { - return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) - } - - if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { - return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) - } - - evtExtensions := evt.Context.GetExtensions() - - resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) - if err != nil { - return nil, fmt.Errorf("failed to get resourceid extension: %v", err) - } - - resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) - if err != nil { - return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) - } - - resourceVersionInt, err := strconv.ParseInt(resourceVersion, 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to convert resourceversion - %v to int64", resourceVersion) - } - - clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) - if err != nil { - return nil, fmt.Errorf("failed to get clustername extension: %v", err) - } - - manifestStatus := &payload.ManifestBundleStatus{} - if err := evt.DataAs(manifestStatus); err != nil { - return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) - } - - work := &workv1.ManifestWork{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - UID: kubetypes.UID(resourceID), - ResourceVersion: resourceVersion, - Generation: resourceVersionInt, - Namespace: clusterName, - }, - Status: workv1.ManifestWorkStatus{ - Conditions: manifestStatus.Conditions, - ResourceStatus: workv1.ManifestResourceStatus{ - Manifests: manifestStatus.ResourceStatus, - }, - }, - } - - return work, nil -} diff --git a/test/integration/cloudevents/source/handler.go b/test/integration/cloudevents/source/handler.go deleted file mode 100644 index dfdac0636..000000000 --- a/test/integration/cloudevents/source/handler.go +++ /dev/null @@ -1,76 +0,0 @@ -package source - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/klog/v2" - - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" -) - -const ManifestWorkFinalizer = "cluster.open-cluster-management.io/manifest-work-cleanup" - -func newManifestWorkStatusHandler(lister workv1lister.ManifestWorkLister, watcher *ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { - return func(action types.ResourceAction, work *workv1.ManifestWork) error { - switch action { - case types.StatusModified: - works, err := lister.ManifestWorks(work.Namespace).List(labels.Everything()) - if err != nil { - return err - } - - var lastWork *workv1.ManifestWork - for _, w := range works { - if w.UID == work.UID { - lastWork = w - break - } - } - - if lastWork == nil { - return fmt.Errorf("failed to find last work with id %s", work.UID) - } - - if work.Generation < lastWork.Generation { - klog.Infof("The work %s generation %d is less than cached generation %d, ignore", - work.UID, work.Generation, lastWork.Generation) - return nil - } - - // no status change - if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { - return nil - } - - // restore the fields that are maintained by local agent - work.Name = lastWork.Name - work.Namespace = lastWork.Namespace - work.Labels = lastWork.Labels - work.Annotations = lastWork.Annotations - work.DeletionTimestamp = lastWork.DeletionTimestamp - work.Spec = lastWork.Spec - - if meta.IsStatusConditionTrue(work.Status.Conditions, ManifestsDeleted) { - work.Finalizers = []string{} - klog.Infof("delete work %s/%s in the source", work.Namespace, work.Name) - watcher.Receive(watch.Event{Type: watch.Deleted, Object: work}) - return nil - } - - // the work is handled by agent, we make sure the finalizer here - work.Finalizers = []string{ManifestWorkFinalizer} - watcher.Receive(watch.Event{Type: watch.Modified, Object: work}) - default: - return fmt.Errorf("unsupported resource action %s", action) - } - - return nil - } -} diff --git a/test/integration/cloudevents/source/lister.go b/test/integration/cloudevents/source/lister.go deleted file mode 100644 index 578c30de7..000000000 --- a/test/integration/cloudevents/source/lister.go +++ /dev/null @@ -1,17 +0,0 @@ -package source - -import ( - "k8s.io/apimachinery/pkg/labels" - - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" -) - -type manifestWorkLister struct { - Lister workv1lister.ManifestWorkLister -} - -func (l *manifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { - return l.Lister.ManifestWorks(options.ClusterName).List(labels.Everything()) -} diff --git a/test/integration/cloudevents/source/manifestwork.go b/test/integration/cloudevents/source/manifestwork.go deleted file mode 100644 index e0bb91cbb..000000000 --- a/test/integration/cloudevents/source/manifestwork.go +++ /dev/null @@ -1,194 +0,0 @@ -package source - -import ( - "context" - "fmt" - - "github.com/google/uuid" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - kubetypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/klog/v2" - - workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" - workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" -) - -const ManifestsDeleted = "Deleted" - -const ( - UpdateRequestAction = "update_request" - DeleteRequestAction = "delete_request" -) - -type manifestWorkSourceClient struct { - cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] - watcher *ManifestWorkWatcher - lister workv1lister.ManifestWorkLister - namespace string -} - -var manifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} - -var _ workv1client.ManifestWorkInterface = &manifestWorkSourceClient{} - -func newManifestWorkSourceClient(cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], - watcher *ManifestWorkWatcher) *manifestWorkSourceClient { - return &manifestWorkSourceClient{ - cloudEventsClient: cloudEventsClient, - watcher: watcher, - } -} - -func (c *manifestWorkSourceClient) SetNamespace(namespace string) *manifestWorkSourceClient { - c.namespace = namespace - return c -} - -func (c *manifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { - c.lister = lister -} - -func (c *manifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { - if manifestWork.Name == "" { - manifestWork.Name = manifestWork.GenerateName + rand.String(5) - } - - klog.Infof("create manifestwork %s/%s", c.namespace, manifestWork.Name) - _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) - if errors.IsNotFound(err) { - newObj := manifestWork.DeepCopy() - newObj.UID = kubetypes.UID(uuid.New().String()) - newObj.ResourceVersion = "1" - newObj.Generation = 1 - newObj.Namespace = c.namespace - - eventType := types.CloudEventsType{ - CloudEventsDataType: payload.ManifestEventDataType, - SubResource: types.SubResourceSpec, - Action: "create_request", - } - - if len(manifestWork.Spec.Workload.Manifests) > 1 { - eventType.CloudEventsDataType = payload.ManifestBundleEventDataType - } - - if err := c.cloudEventsClient.Publish(ctx, eventType, newObj); err != nil { - return nil, err - } - - // refresh cache - c.watcher.Receive(watch.Event{Type: watch.Added, Object: newObj}) - return newObj, nil - } - - if err != nil { - return nil, err - } - - return nil, errors.NewAlreadyExists(manifestWorkGR, manifestWork.Name) -} - -func (c *manifestWorkSourceClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { - klog.Infof("update manifestwork %s/%s", c.namespace, manifestWork.Name) - lastWork, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) - if err != nil { - return nil, err - } - - if equality.Semantic.DeepEqual(lastWork.Spec, manifestWork.Spec) { - return manifestWork, nil - } - - updatedObj := manifestWork.DeepCopy() - updatedObj.Generation = updatedObj.Generation + 1 - updatedObj.ResourceVersion = fmt.Sprintf("%d", updatedObj.Generation) - - eventType := types.CloudEventsType{ - CloudEventsDataType: payload.ManifestEventDataType, - SubResource: types.SubResourceSpec, - Action: "update_request", - } - - if len(manifestWork.Spec.Workload.Manifests) > 1 { - eventType.CloudEventsDataType = payload.ManifestBundleEventDataType - } - - if err := c.cloudEventsClient.Publish(ctx, eventType, updatedObj); err != nil { - return nil, err - } - - // refresh cache - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedObj}) - - return updatedObj, nil -} - -func (c *manifestWorkSourceClient) UpdateStatus(ctx context.Context, - manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { - return nil, errors.NewMethodNotSupported(manifestWorkGR, "updatestatus") -} - -func (c *manifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - klog.Infof("delete manifestwork %s/%s", c.namespace, name) - manifestWork, err := c.lister.ManifestWorks(c.namespace).Get(name) - if err != nil { - return err - } - - // actual deletion should be done after hub receive delete status - deletedObj := manifestWork.DeepCopy() - now := metav1.Now() - deletedObj.DeletionTimestamp = &now - - eventType := types.CloudEventsType{ - CloudEventsDataType: payload.ManifestEventDataType, - SubResource: types.SubResourceSpec, - Action: "delete_request", - } - - if len(manifestWork.Spec.Workload.Manifests) > 1 { - eventType.CloudEventsDataType = payload.ManifestBundleEventDataType - } - - if err := c.cloudEventsClient.Publish(ctx, eventType, deletedObj); err != nil { - return err - } - - // refresh cache - c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletedObj}) - return nil -} - -func (c *manifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - return errors.NewMethodNotSupported(manifestWorkGR, "deletecollection") -} - -func (c *manifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { - work, err := c.lister.ManifestWorks(c.namespace).Get(name) - if err != nil { - return nil, err - } - return work.DeepCopy(), nil -} - -func (c *manifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { - return &workv1.ManifestWorkList{}, nil -} - -func (c *manifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.watcher, nil -} - -func (c *manifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, - opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { - return nil, errors.NewMethodNotSupported(manifestWorkGR, "patch") -} diff --git a/test/integration/cloudevents/source/source.go b/test/integration/cloudevents/source/source.go deleted file mode 100644 index 12ddcecc6..000000000 --- a/test/integration/cloudevents/source/source.go +++ /dev/null @@ -1,136 +0,0 @@ -package source - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "github.com/ghodss/yaml" - mochimqtt "github.com/mochi-mqtt/server/v2" - "github.com/mochi-mqtt/server/v2/hooks/auth" - "github.com/mochi-mqtt/server/v2/listeners" - - workclientset "open-cluster-management.io/api/client/work/clientset/versioned" - workinformers "open-cluster-management.io/api/client/work/informers/externalversions" - workv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work" -) - -const ( - sourceID = "cloudevents-mqtt-integration-test" - mqttBrokerHost = "127.0.0.1:1883" -) - -var mqttBroker *mochimqtt.Server - -type Source interface { - Host() string - Start(ctx context.Context) error - Stop() error - Workclientset() workclientset.Interface -} - -type MQTTSource struct { - configFile string - workClientSet workclientset.Interface -} - -func NewMQTTSource(configFile string) *MQTTSource { - return &MQTTSource{ - configFile: configFile, - } -} - -func (m *MQTTSource) Host() string { - return mqttBrokerHost -} - -func (m *MQTTSource) Start(ctx context.Context) error { - // start a MQTT broker - mqttBroker = mochimqtt.New(nil) - - // allow all connections - if err := mqttBroker.AddHook(new(auth.AllowHook), nil); err != nil { - return err - } - - if err := mqttBroker.AddListener(listeners.NewTCP( - listeners.Config{ - ID: "mqtt-test-broker", - Address: mqttBrokerHost, - })); err != nil { - return err - } - - go func() { - if err := mqttBroker.Serve(); err != nil { - log.Fatal(err) - } - }() - - // write the mqtt broker config to a file - config := mqtt.MQTTConfig{ - BrokerHost: mqttBrokerHost, - Topics: &types.Topics{ - SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", sourceID), - AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", sourceID), - }, - } - - configData, err := yaml.Marshal(config) - if err != nil { - return err - } - if err := os.WriteFile(m.configFile, configData, 0600); err != nil { - return err - } - - // build a source client - workLister := &manifestWorkLister{} - watcher := NewManifestWorkWatcher() - mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(m.configFile) - if err != nil { - return err - } - cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( - ctx, - mqtt.NewSourceOptions(mqttOptions, fmt.Sprintf("%s-client", sourceID), sourceID), - workLister, - work.ManifestWorkStatusHash, - &ManifestCodec{}, - &ManifestBundleCodec{}, - ) - if err != nil { - return err - } - - manifestWorkClient := newManifestWorkSourceClient(cloudEventsClient, watcher) - workClient := &workV1ClientWrapper{ManifestWorkClient: manifestWorkClient} - workClientSet := &workClientSetWrapper{WorkV1ClientWrapper: workClient} - factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, 1*time.Hour) - informers := factory.Work().V1().ManifestWorks() - manifestWorkLister := informers.Lister() - workLister.Lister = manifestWorkLister - manifestWorkClient.SetLister(manifestWorkLister) - - // start the source client - cloudEventsClient.Subscribe(ctx, newManifestWorkStatusHandler(manifestWorkLister, watcher)) - m.workClientSet = workClientSet - - go informers.Informer().Run(ctx.Done()) - - return nil -} - -func (m *MQTTSource) Stop() error { - return mqttBroker.Close() -} - -func (m *MQTTSource) Workclientset() workclientset.Interface { - return m.workClientSet -} diff --git a/test/integration/cloudevents/source/watcher.go b/test/integration/cloudevents/source/watcher.go deleted file mode 100644 index 913c52bb4..000000000 --- a/test/integration/cloudevents/source/watcher.go +++ /dev/null @@ -1,64 +0,0 @@ -package source - -import ( - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/klog/v2" -) - -// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. -type ManifestWorkWatcher struct { - sync.Mutex - - result chan watch.Event - done chan struct{} -} - -var _ watch.Interface = &ManifestWorkWatcher{} - -func NewManifestWorkWatcher() *ManifestWorkWatcher { - mw := &ManifestWorkWatcher{ - // It's easy for a consumer to add buffering via an extra - // goroutine/channel, but impossible for them to remove it, - // so nonbuffered is better. - result: make(chan watch.Event), - // If the watcher is externally stopped there is no receiver anymore - // and the send operations on the result channel, especially the - // error reporting might block forever. - // Therefore a dedicated stop channel is used to resolve this blocking. - done: make(chan struct{}), - } - - return mw -} - -// ResultChan implements Interface. -func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { - return mw.result -} - -// Stop implements Interface. -func (mw *ManifestWorkWatcher) Stop() { - // Call Close() exactly once by locking and setting a flag. - mw.Lock() - defer mw.Unlock() - // closing a closed channel always panics, therefore check before closing - select { - case <-mw.done: - close(mw.result) - default: - close(mw.done) - } -} - -// Receive a event from the work client and sends down the result channel. -func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { - if klog.V(4).Enabled() { - obj, _ := meta.Accessor(evt.Object) - klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) - } - - mw.result <- evt -} diff --git a/test/integration/cloudevents/source/workclientset.go b/test/integration/cloudevents/source/workclientset.go deleted file mode 100644 index 68f1aa8d0..000000000 --- a/test/integration/cloudevents/source/workclientset.go +++ /dev/null @@ -1,46 +0,0 @@ -package source - -import ( - discovery "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - - workclientset "open-cluster-management.io/api/client/work/clientset/versioned" - workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" - workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" -) - -type workClientSetWrapper struct { - WorkV1ClientWrapper *workV1ClientWrapper -} - -var _ workclientset.Interface = &workClientSetWrapper{} - -func (c *workClientSetWrapper) WorkV1() workv1client.WorkV1Interface { - return c.WorkV1ClientWrapper -} - -func (c *workClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { - return nil -} - -func (c *workClientSetWrapper) Discovery() discovery.DiscoveryInterface { - return nil -} - -type workV1ClientWrapper struct { - ManifestWorkClient *manifestWorkSourceClient -} - -var _ workv1client.WorkV1Interface = &workV1ClientWrapper{} - -func (c *workV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { - return c.ManifestWorkClient.SetNamespace(namespace) -} - -func (c *workV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { - return nil -} - -func (c *workV1ClientWrapper) RESTClient() rest.Interface { - return nil -} diff --git a/test/integration/cloudevents/statusfeedback_test.go b/test/integration/cloudevents/statusfeedback_test.go deleted file mode 100644 index 535fd3234..000000000 --- a/test/integration/cloudevents/statusfeedback_test.go +++ /dev/null @@ -1,726 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "time" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - - ocmfeature "open-cluster-management.io/api/feature" - workapiv1 "open-cluster-management.io/api/work/v1" - - commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/features" - "open-cluster-management.io/ocm/pkg/work/spoke" - "open-cluster-management.io/ocm/test/integration/util" -) - -var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { - var o *spoke.WorkloadAgentOptions - var commOptions *commonoptions.AgentOptions - var cancel context.CancelFunc - - var work *workapiv1.ManifestWork - var manifests []workapiv1.Manifest - - var err error - - ginkgo.BeforeEach(func() { - clusterName := utilrand.String(5) - - ns := &corev1.Namespace{} - ns.Name = clusterName - _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - o = spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.WorkloadSourceDriver = workSourceDriver - o.WorkloadSourceConfig = workSourceConfigFileName - o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) - o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"} - - commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = clusterName - - // reset manifests - manifests = nil - }) - - ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.AfterEach(func() { - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.Context("Deployment Status feedback", func() { - ginkgo.BeforeEach(func() { - u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - manifests = append(manifests, util.ToManifest(u)) - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - go runWorkAgent(ctx, o, commOptions) - }) - - ginkgo.AfterEach(func() { - if cancel != nil { - cancel() - } - }) - - ginkgo.It("should return well known statuses", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: commOptions.SpokeClusterName, - Name: "deploy1", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.WellKnownStatusType, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Update Deployment status on spoke - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - deploy.Status.AvailableReplicas = 2 - deploy.Status.Replicas = 3 - deploy.Status.ReadyReplicas = 2 - - _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if we get status of deployment on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "ReadyReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - { - Name: "Replicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "AvailableReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { - return fmt.Errorf("status sync condition should be True") - } - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Update replica of deployment - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - deploy.Status.AvailableReplicas = 3 - deploy.Status.Replicas = 3 - deploy.Status.ReadyReplicas = 3 - - _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if the status of deployment is synced on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "ReadyReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "Replicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "AvailableReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { - return fmt.Errorf("status sync condition should be True") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("should return statuses by JSONPaths", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: commOptions.SpokeClusterName, - Name: "deploy1", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.JSONPathsType, - JsonPaths: []workapiv1.JsonPath{ - { - Name: "Available", - Path: ".status.conditions[?(@.type==\"Available\")].status", - }, - { - Name: "wrong json path", - Path: ".status.conditions", - }, - }, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - deploy.Status.Conditions = []appsv1.DeploymentCondition{ - { - Type: "Available", - Status: "True", - }, - } - - _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if we get status of deployment on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "Available", - Value: workapiv1.FieldValue{ - Type: workapiv1.String, - String: ptr.To[string]("True"), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionFalse}) { - return fmt.Errorf("status sync condition should be False") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("should return none for resources with no wellknown status", func() { - u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - sa, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "sa") - - work = util.NewManifestWork(commOptions.SpokeClusterName, "", []workapiv1.Manifest{}) - work.Spec.Workload.Manifests = []workapiv1.Manifest{ - util.ToManifest(u), - util.ToManifest(sa), - } - - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: commOptions.SpokeClusterName, - Name: "deploy1", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.WellKnownStatusType, - }, - }, - }, - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "", - Resource: "serviceaccounts", - Namespace: commOptions.SpokeClusterName, - Name: "sa", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.WellKnownStatusType, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Update Deployment status on spoke - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - deploy.Status.AvailableReplicas = 2 - deploy.Status.Replicas = 3 - deploy.Status.ReadyReplicas = 2 - - _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if we get status of deployment on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 2 { - return fmt.Errorf("the size of resource status is not correct, expect to be 2 but got %d", len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "ReadyReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - { - Name: "Replicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "AvailableReplicas", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests) - } - - if len(work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) != 0 { - return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) - } - - if !util.HaveManifestCondition( - work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse}) { - return fmt.Errorf("status sync condition should be True") - } - - return nil - }, eventuallyTimeout*2, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() { - ginkgo.BeforeEach(func() { - u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - manifests = append(manifests, util.ToManifest(u)) - - err = features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=true", ocmfeature.RawFeedbackJsonString)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - go runWorkAgent(ctx, o, commOptions) - }) - - ginkgo.AfterEach(func() { - if cancel != nil { - cancel() - } - }) - - ginkgo.It("Should return raw json string if the result is a structure", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: commOptions.SpokeClusterName, - Name: "deploy1", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.JSONPathsType, - JsonPaths: []workapiv1.JsonPath{ - { - Name: "conditions", - Path: ".status.conditions", - }, - }, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - deploy.Status.Conditions = []appsv1.DeploymentCondition{ - { - Type: "Available", - Status: "True", - }, - } - - _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if we get status of deployment on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "conditions", - Value: workapiv1.FieldValue{ - Type: workapiv1.JsonRaw, - JsonRaw: ptr.To[string](`[{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Available"}]`), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - if len(values) > 0 { - return fmt.Errorf("status feedback values are not correct, we got %v", *values[0].Value.JsonRaw) - } - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { - return fmt.Errorf("status sync condition should be True") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("DaemonSet Status feedback", func() { - ginkgo.BeforeEach(func() { - u, _, err := util.NewDaesonSet(commOptions.SpokeClusterName, "ds1") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - manifests = append(manifests, util.ToManifest(u)) - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - go runWorkAgent(ctx, o, commOptions) - }) - - ginkgo.AfterEach(func() { - if cancel != nil { - cancel() - } - }) - - ginkgo.It("should return well known statuses", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "daemonsets", - Namespace: commOptions.SpokeClusterName, - Name: "ds1", - }, - FeedbackRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.WellKnownStatusType, - }, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName). - Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, - workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, - eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, - workapiv1.WorkAvailable, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, - eventuallyTimeout, eventuallyInterval) - - // Update DaemonSet status on spoke - gomega.Eventually(func() error { - ds, err := spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName). - Get(context.Background(), "ds1", metav1.GetOptions{}) - if err != nil { - return err - } - - ds.Status.NumberAvailable = 2 - ds.Status.DesiredNumberScheduled = 3 - ds.Status.NumberReady = 2 - - _, err = spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName). - UpdateStatus(context.Background(), ds, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if we get status of daemonset on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName). - Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", - len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "NumberReady", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - { - Name: "DesiredNumberScheduled", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "NumberAvailable", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](2), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, - "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { - return fmt.Errorf("status sync condition should be True") - } - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Update replica of deployment - gomega.Eventually(func() error { - ds, err := spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName). - Get(context.Background(), "ds1", metav1.GetOptions{}) - if err != nil { - return err - } - - ds.Status.NumberAvailable = 3 - ds.Status.DesiredNumberScheduled = 3 - ds.Status.NumberReady = 3 - - _, err = spokeKubeClient.AppsV1().DaemonSets(commOptions.SpokeClusterName). - UpdateStatus(context.Background(), ds, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Check if the status of the daemonset is synced on work api - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName). - Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(work.Status.ResourceStatus.Manifests) != 1 { - return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", - len(work.Status.ResourceStatus.Manifests)) - } - - values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values - - expectedValues := []workapiv1.FeedbackValue{ - { - Name: "NumberReady", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "DesiredNumberScheduled", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - { - Name: "NumberAvailable", - Value: workapiv1.FieldValue{ - Type: workapiv1.Integer, - Integer: ptr.To[int64](3), - }, - }, - } - if !apiequality.Semantic.DeepEqual(values, expectedValues) { - return fmt.Errorf("status feedback values are not correct, we got %v", values) - } - - if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, - "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { - return fmt.Errorf("status sync condition should be True") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) - -}) diff --git a/test/integration/cloudevents/suite_test.go b/test/integration/cloudevents/suite_test.go deleted file mode 100644 index 0626d82d3..000000000 --- a/test/integration/cloudevents/suite_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "os" - "path" - "testing" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "go.uber.org/zap/zapcore" - "gopkg.in/yaml.v2" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" - workclientset "open-cluster-management.io/api/client/work/clientset/versioned" - ocmfeature "open-cluster-management.io/api/feature" - workapiv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" - "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - - "open-cluster-management.io/ocm/pkg/features" - "open-cluster-management.io/ocm/pkg/work/helper" - "open-cluster-management.io/ocm/test/integration/cloudevents/source" -) - -const ( - eventuallyTimeout = 60 // seconds - eventuallyInterval = 1 // seconds - cm1, cm2 = "cm1", "cm2" -) - -// TODO consider to use one integration with work integration -// focus on source is a MQTT broker -const workSourceDriver = "mqtt" - -var tempDir string - -var testEnv *envtest.Environment -var envCtx context.Context -var envCancel context.CancelFunc - -var workSource source.Source -var workSourceConfigFileName string -var workSourceWorkClient workclientset.Interface -var workSourceHash string - -var mwrsConfigFileName string - -var hubRestConfig *rest.Config -var hubClusterClient clusterclientset.Interface -var hubWorkClient workclientset.Interface - -var spokeRestConfig *rest.Config -var spokeKubeClient kubernetes.Interface -var spokeWorkClient workclientset.Interface - -var CRDPaths = []string{ - // hub - "./vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml", - "./vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml", - "./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml", - "./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml", - // spoke - "./vendor/open-cluster-management.io/api/work/v1/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml", -} - -func TestIntegration(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - ginkgo.RunSpecs(t, "Integration Suite") -} - -var _ = ginkgo.BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.UseDevMode(true), zap.Level(zapcore.DebugLevel))) - ginkgo.By("bootstrapping test environment") - - // start a kube-apiserver - testEnv = &envtest.Environment{ - ErrorIfCRDPathMissing: true, - CRDDirectoryPaths: CRDPaths, - } - envCtx, envCancel = context.WithCancel(context.TODO()) - cfg, err := testEnv.Start() - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(cfg).ToNot(gomega.BeNil()) - - tempDir, err = os.MkdirTemp("", "test") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(tempDir).ToNot(gomega.BeEmpty()) - - err = workapiv1.Install(scheme.Scheme) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates) - - spokeRestConfig = cfg - spokeKubeClient, err = kubernetes.NewForConfig(cfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - spokeWorkClient, err = workclientset.NewForConfig(cfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - hubRestConfig = cfg - hubClusterClient, err = clusterclientset.NewForConfig(cfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - hubWorkClient, err = workclientset.NewForConfig(cfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - switch workSourceDriver { - case "mqtt": - // create mqttconfig file for source in a tmp dir - workSourceConfigFileName = path.Join(tempDir, "mqttconfig") - - workSource = source.NewMQTTSource(workSourceConfigFileName) - err := workSource.Start(envCtx) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - workSourceHash = helper.HubHash(workSource.Host()) - - workSourceWorkClient = workSource.Workclientset() - gomega.Expect(workSourceWorkClient).ToNot(gomega.BeNil()) - - // create mqttconfig file for mwrsctrl in a tmp dir - mwrsConfigFileName = path.Join(tempDir, "mwrsctrl-mqttconfig") - config := mqtt.MQTTConfig{ - BrokerHost: workSource.Host(), - Topics: &types.Topics{ - SourceEvents: "sources/mwrsctrl/clusters/+/sourceevents", - AgentEvents: "sources/mwrsctrl/clusters/+/agentevents", - SourceBroadcast: "sources/mwrsctrl/sourcebroadcast", - }, - } - - configData, err := yaml.Marshal(config) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = os.WriteFile(mwrsConfigFileName, configData, 0600) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - default: - ginkgo.AbortSuite(fmt.Sprintf("unsupported source driver: %s", workSourceDriver)) - } -}) - -var _ = ginkgo.AfterSuite(func() { - ginkgo.By("tearing down the test environment") - - envCancel() - - err := workSource.Stop() - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - err = testEnv.Stop() - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - if tempDir != "" { - os.RemoveAll(tempDir) - } -}) diff --git a/test/integration/cloudevents/updatestrategy_test.go b/test/integration/cloudevents/updatestrategy_test.go deleted file mode 100644 index 0909e3e9b..000000000 --- a/test/integration/cloudevents/updatestrategy_test.go +++ /dev/null @@ -1,444 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "time" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" - utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - - workapiv1 "open-cluster-management.io/api/work/v1" - - commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/work/spoke" - "open-cluster-management.io/ocm/test/integration/util" -) - -var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { - var err error - var cancel context.CancelFunc - - var clusterName string - - var work *workapiv1.ManifestWork - var manifests []workapiv1.Manifest - - ginkgo.BeforeEach(func() { - clusterName = utilrand.String(5) - - ns := &corev1.Namespace{} - ns.Name = clusterName - _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - o := spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.WorkloadSourceDriver = workSourceDriver - o.WorkloadSourceConfig = workSourceConfigFileName - o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) - o.CloudEventsClientCodecs = []string{"manifest"} - - commOptions := commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = clusterName - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - go runWorkAgent(ctx, o, commOptions) - - // reset manifests - manifests = nil - }) - - ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(clusterName, "", manifests) - }) - - ginkgo.AfterEach(func() { - if cancel != nil { - cancel() - } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.Context("Create only strategy", func() { - var object *unstructured.Unstructured - - ginkgo.BeforeEach(func() { - object, _, err = util.NewDeployment(clusterName, "deploy1", "sa") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - manifests = append(manifests, util.ToManifest(object)) - }) - - ginkgo.It("deployed resource should not be updated when work is updated", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeCreateOnly, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // update work - err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - if *deploy.Spec.Replicas != 1 { - return fmt.Errorf("replicas should not be changed") - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) - - ginkgo.Context("Server side apply strategy", func() { - var object *unstructured.Unstructured - - ginkgo.BeforeEach(func() { - object, _, err = util.NewDeployment(clusterName, "deploy1", "sa") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - manifests = append(manifests, util.ToManifest(object)) - }) - - ginkgo.It("deployed resource should be applied when work is updated", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // update work - err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - if *deploy.Spec.Replicas != 3 { - return fmt.Errorf("replicas should be updated to 3 but got %d", *deploy.Spec.Replicas) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("should get conflict if a field is taken by another manager", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // update deployment with another field manager - err = unstructured.SetNestedField(object.Object, int64(2), "spec", "replicas") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - patch, err := object.MarshalJSON() - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.AppsV1().Deployments(clusterName).Patch( - context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Update deployment by work - err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Failed to apply due to conflict - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) - - // remove the replica field and apply should work - unstructured.RemoveNestedField(object.Object, "spec", "replicas") - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("two manifest works with different field manager", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Create another work with different fieldmanager - objCopy := object.DeepCopy() - // work1 does not want to own replica field - unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") - work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) - work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - ServerSideApply: &workapiv1.ServerSideApplyConfig{ - Force: true, - FieldManager: "work-agent-another", - }, - }, - }, - } - - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Update deployment replica by work should work since this work still owns the replicas field - err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // This should work since this work still own replicas - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - if *deploy.Spec.Replicas != 3 { - return fmt.Errorf("expected replica is not correct, got %d", *deploy.Spec.Replicas) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Update sa field will not work - err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName") - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // This should work since this work still own replicas - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("with delete options", func() { - work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - }, - }, - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Create another work with different fieldmanager - objCopy := object.DeepCopy() - // work1 does not want to own replica field - unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") - work1 := util.NewManifestWork(clusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) - work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Namespace: clusterName, - Name: "deploy1", - }, - UpdateStrategy: &workapiv1.UpdateStrategy{ - Type: workapiv1.UpdateStrategyTypeServerSideApply, - ServerSideApply: &workapiv1.ServerSideApplyConfig{ - Force: true, - FieldManager: "work-agent-another", - }, - }, - }, - } - - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work1, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(deploy.OwnerReferences) != 2 { - return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // update deleteOption of the first work - gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan} - _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(deploy.OwnerReferences) != 1 { - return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - }) -}) diff --git a/test/integration/cloudevents/work_test.go b/test/integration/cloudevents/work_test.go deleted file mode 100644 index 111d12f4b..000000000 --- a/test/integration/cloudevents/work_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package cloudevents - -import ( - "context" - "fmt" - "time" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "github.com/openshift/library-go/pkg/controller/controllercmd" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" - - workapiv1 "open-cluster-management.io/api/work/v1" - - commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/work/spoke" - "open-cluster-management.io/ocm/test/integration/util" -) - -func runWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) { - agentConfig := spoke.NewWorkAgentConfig(commOption, o) - err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{ - KubeConfig: spokeRestConfig, - EventRecorder: util.NewIntegrationTestEventRecorder("integration"), - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -var _ = ginkgo.Describe("ManifestWork", func() { - var err error - var cancel context.CancelFunc - - var clusterName string - - var work *workapiv1.ManifestWork - var manifests []workapiv1.Manifest - var appliedManifestWorkName string - - ginkgo.BeforeEach(func() { - clusterName = utilrand.String(5) - - ns := &corev1.Namespace{} - ns.Name = clusterName - _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - var ctx context.Context - ctx, cancel = context.WithCancel(context.Background()) - - o := spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second - o.WorkloadSourceDriver = workSourceDriver - o.WorkloadSourceConfig = workSourceConfigFileName - o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) - o.CloudEventsClientCodecs = []string{"manifest", "manifestbundle"} - - commOptions := commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = clusterName - - go runWorkAgent(ctx, o, commOptions) - - // reset manifests - manifests = nil - }) - - ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(clusterName, "", manifests) - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // if the source is not kube, the uid will be used as the manifestwork name - appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID) - }) - - ginkgo.AfterEach(func() { - err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - if !errors.IsNotFound(err) { - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - } - - gomega.Eventually(func() error { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return fmt.Errorf("work %s in namespace %s still exists", work.Name, clusterName) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - - err = spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - if cancel != nil { - cancel() - } - }) - - ginkgo.Context("With a single manifest", func() { - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)), - } - }) - - ginkgo.It("should create work and then apply it successfully", func() { - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("should update work and then apply it successfully", func() { - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)), - } - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = newManifests - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // check if resource created by stale manifest is deleted once it is removed from applied resource list - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == cm1 { - return fmt.Errorf("found applied resource cm1") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - }) - - ginkgo.It("should delete work successfully", func() { - util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval) - - err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.UID), manifests, - workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - }) - }) - - ginkgo.Context("With multiple manifests", func() { - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap("non-existent-namespace", cm1, map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)), - util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, nil)), - } - }) - - ginkgo.It("should create work and then apply it successfully", func() { - util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("should update work and then apply it successfully", func() { - util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse, - []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)), - util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)), - util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"e": "f"}, nil)), - } - - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = newManifests - work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // check if Available status is updated or not - util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // check if resource created by stale manifest is deleted once it is removed from applied resource list - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm3" { - return fmt.Errorf("found appled resource cm3") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), "cm3", metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - }) - - ginkgo.It("should delete work successfully", func() { - util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval) - - err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.Name), manifests, - workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - }) - }) -}) diff --git a/test/integration/util/assertion.go b/test/integration/util/assertion.go index 75d770225..98a826be3 100644 --- a/test/integration/util/assertion.go +++ b/test/integration/util/assertion.go @@ -143,7 +143,7 @@ func AssertAppliedManifestWorkDeleted(name string, workClient workclientset.Inte } // AssertFinalizerAdded check if finalizer is added -func AssertFinalizerAdded(namespace, name string, workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) { +func AssertFinalizerAdded(namespace, name, expectedFinalizer string, workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) { gomega.Eventually(func() error { work, err := workClient.WorkV1().ManifestWorks(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { @@ -151,7 +151,7 @@ func AssertFinalizerAdded(namespace, name string, workClient workclientset.Inter } for _, finalizer := range work.Finalizers { - if finalizer == workapiv1.ManifestWorkFinalizer { + if finalizer == expectedFinalizer { return nil } } @@ -230,7 +230,7 @@ func AssertNonexistenceOfResources(gvrs []schema.GroupVersionResource, namespace } // AssertAppliedResources check if applied resources in work status are updated correctly -func AssertAppliedResources(hubHash, workName string, gvrs []schema.GroupVersionResource, namespaces, names []string, +func AssertAppliedResources(appliedManifestWorkName string, gvrs []schema.GroupVersionResource, namespaces, names []string, workClient workclientset.Interface, eventuallyTimeout, eventuallyInterval int) { gomega.Expect(gvrs).To(gomega.HaveLen(len(namespaces))) gomega.Expect(gvrs).To(gomega.HaveLen(len(names))) @@ -264,7 +264,6 @@ func AssertAppliedResources(hubHash, workName string, gvrs []schema.GroupVersion }) gomega.Eventually(func() error { - appliedManifestWorkName := fmt.Sprintf("%s-%s", hubHash, workName) appliedManifestWork, err := workClient.WorkV1().AppliedManifestWorks().Get( context.Background(), appliedManifestWorkName, metav1.GetOptions{}) if err != nil { diff --git a/test/integration/util/authentication.go b/test/integration/util/authentication.go index d83906e8e..2893679f2 100644 --- a/test/integration/util/authentication.go +++ b/test/integration/util/authentication.go @@ -19,7 +19,6 @@ import ( certificates "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -322,7 +321,7 @@ func SyncBootstrapKubeConfigFilesToSecret( } secret, err := kubeClient.CoreV1().Secrets(secretNS).Get(context.Background(), secretName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { + if errors.IsNotFound(err) { secret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, diff --git a/test/integration/util/mqtt.go b/test/integration/util/mqtt.go new file mode 100644 index 000000000..dea557512 --- /dev/null +++ b/test/integration/util/mqtt.go @@ -0,0 +1,91 @@ +package util + +import ( + "fmt" + "log" + "os" + "time" + + mochimqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" + "gopkg.in/yaml.v2" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +const MQTTBrokerHost = "127.0.0.1:1883" + +var mqttBroker *mochimqtt.Server + +func RunMQTTBroker() error { + // start a MQTT broker + mqttBroker = mochimqtt.New(nil) + + // allow all connections + if err := mqttBroker.AddHook(new(auth.AllowHook), nil); err != nil { + return err + } + + if err := mqttBroker.AddListener(listeners.NewTCP( + listeners.Config{ + ID: "mqtt-test-broker", + Address: MQTTBrokerHost, + })); err != nil { + return err + } + + go func() { + if err := mqttBroker.Serve(); err != nil { + log.Fatal(err) + } + }() + + return nil +} + +func StopMQTTBroker() error { + if mqttBroker != nil { + return mqttBroker.Close() + } + + return nil +} + +func CreateMQTTConfigFile(configFileName, sourceID string) error { + config := mqtt.MQTTConfig{ + BrokerHost: MQTTBrokerHost, + Topics: &types.Topics{ + SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", sourceID), + AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", sourceID), + }, + } + + configData, err := yaml.Marshal(config) + if err != nil { + return err + } + + if err := os.WriteFile(configFileName, configData, 0600); err != nil { + return err + } + + return nil +} + +func NewMQTTSourceOptions(sourceID string) *mqtt.MQTTOptions { + return &mqtt.MQTTOptions{ + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + Topics: types.Topics{ + SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", sourceID), + AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", sourceID), + SourceBroadcast: "sources/+/sourcebroadcast", + }, + Dialer: &mqtt.MQTTDialer{ + BrokerHost: MQTTBrokerHost, + Timeout: 5 * time.Second, + }, + } +} diff --git a/test/integration/util/work.go b/test/integration/util/work.go new file mode 100644 index 000000000..78083ab10 --- /dev/null +++ b/test/integration/util/work.go @@ -0,0 +1,42 @@ +package util + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +const ( + KubeDriver = "kube" + MQTTDriver = "mqtt" +) + +func NewWorkPatch(old, new *workapiv1.ManifestWork) ([]byte, error) { + oldData, err := json.Marshal(old) + if err != nil { + return nil, err + } + + newData, err := json.Marshal(new) + if err != nil { + return nil, err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return nil, err + } + + return patchBytes, nil +} + +func AppliedManifestWorkName(sourceDriver, hubHash string, work *workapiv1.ManifestWork) string { + if sourceDriver != KubeDriver { + // if the source is not kube, the uid will be used as the manifestwork name on the agent side + return fmt.Sprintf("%s-%s", hubHash, work.UID) + } + + return fmt.Sprintf("%s-%s", hubHash, work.Name) +} diff --git a/test/integration/work/deleteoption_test.go b/test/integration/work/deleteoption_test.go index 0dfd780e7..48c5c3978 100644 --- a/test/integration/work/deleteoption_test.go +++ b/test/integration/work/deleteoption_test.go @@ -10,7 +10,8 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" workapiv1 "open-cluster-management.io/api/work/v1" @@ -24,6 +25,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { var commOptions *commonoptions.AgentOptions var cancel context.CancelFunc + var workName string var work *workapiv1.ManifestWork var appliedManifestWorkName string var manifests []workapiv1.Manifest @@ -31,13 +33,20 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { var err error ginkgo.BeforeEach(func() { + clusterName := rand.String(5) + workName = fmt.Sprintf("work-delete-option-%s", rand.String(5)) + o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.WorkloadSourceDriver = sourceDriver o.WorkloadSourceConfig = sourceConfigFileName + if sourceDriver != util.KubeDriver { + o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) + o.CloudEventsClientCodecs = []string{"manifestbundle"} + } commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + commOptions.SpokeClusterName = clusterName ns := &corev1.Namespace{} ns.Name = commOptions.SpokeClusterName @@ -53,7 +62,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -81,7 +90,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + appliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, work) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) @@ -95,7 +104,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, anotherWork.Name) + anotherAppliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, anotherWork) }) ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { @@ -224,10 +233,17 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Update one manifestwork to remove the shared resource - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]} - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]} + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Ensure the resource is not tracked by the appliedmanifestwork. @@ -293,7 +309,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, } - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, @@ -446,15 +462,23 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { // Remove the resource from the manifests gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } - work.Spec.Workload.Manifests = []workapiv1.Manifest{ + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = []workapiv1.Manifest{ util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"c": "d"}, []string{})), } - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + if err != nil { + return err + } + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -511,13 +535,21 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { // Remove the delete option gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + newWork := updatedWork.DeepCopy() + newWork.Spec.DeleteOption = nil + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) if err != nil { return err } - work.Spec.DeleteOption = nil - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) diff --git a/test/integration/work/manifestworkreplicaset_test.go b/test/integration/work/manifestworkreplicaset_test.go index 426ab36f3..d4e23f4bb 100644 --- a/test/integration/work/manifestworkreplicaset_test.go +++ b/test/integration/work/manifestworkreplicaset_test.go @@ -278,7 +278,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { ginkgo.By("rollout stop since max failure exceeds") gomega.Eventually( - asserCondition( + assertCondition( workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, metav1.ConditionFalse, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) gomega.Eventually( @@ -409,7 +409,7 @@ func assertSummary(summary workapiv1alpha1.ManifestWorkReplicaSetSummary, mwrs * } } -func asserCondition(condType string, status metav1.ConditionStatus, mwrs *workapiv1alpha1.ManifestWorkReplicaSet) func() error { +func assertCondition(condType string, status metav1.ConditionStatus, mwrs *workapiv1alpha1.ManifestWorkReplicaSet) func() error { return func() error { rs, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrs.Namespace).Get(context.TODO(), mwrs.Name, metav1.GetOptions{}) diff --git a/test/integration/work/statusfeedback_test.go b/test/integration/work/statusfeedback_test.go index 6a08b1cf2..d57e91223 100644 --- a/test/integration/work/statusfeedback_test.go +++ b/test/integration/work/statusfeedback_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" ocmfeature "open-cluster-management.io/api/feature" @@ -28,19 +28,27 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { var commOptions *commonoptions.AgentOptions var cancel context.CancelFunc + var workName string var work *workapiv1.ManifestWork var manifests []workapiv1.Manifest var err error ginkgo.BeforeEach(func() { + workName = fmt.Sprintf("status-feedback-work-%s", rand.String(5)) + clusterName := rand.String(5) + o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.WorkloadSourceDriver = sourceDriver o.WorkloadSourceConfig = sourceConfigFileName + if sourceDriver != util.KubeDriver { + o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) + o.CloudEventsClientCodecs = []string{"manifestbundle"} + } commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + commOptions.SpokeClusterName = clusterName ns := &corev1.Namespace{} ns.Name = commOptions.SpokeClusterName @@ -52,7 +60,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) diff --git a/test/integration/work/suite_test.go b/test/integration/work/suite_test.go index 272192c4b..e7a2e8987 100644 --- a/test/integration/work/suite_test.go +++ b/test/integration/work/suite_test.go @@ -2,6 +2,8 @@ package work import ( "context" + "flag" + "fmt" "os" "path" "testing" @@ -9,9 +11,12 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/openshift/library-go/pkg/controller/controllercmd" + + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -21,6 +26,10 @@ import ( ocmfeature "open-cluster-management.io/api/feature" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" + sourcecodec "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" + workstore "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" + "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/work/helper" "open-cluster-management.io/ocm/pkg/work/hub" @@ -33,8 +42,7 @@ const ( cm1, cm2 = "cm1", "cm2" ) -// focus on hub is a kube cluster -const sourceDriver = "kube" +var sourceDriver = util.KubeDriver var tempDir string @@ -62,6 +70,13 @@ var CRDPaths = []string{ "./vendor/open-cluster-management.io/api/work/v1/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml", } +func init() { + klog.InitFlags(nil) + klog.SetOutput(ginkgo.GinkgoWriter) + + flag.StringVar(&sourceDriver, "test.driver", util.KubeDriver, "Driver of test, default is kube") +} + func TestIntegration(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, "Integration Suite") @@ -86,41 +101,73 @@ var _ = ginkgo.BeforeSuite(func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(tempDir).ToNot(gomega.BeEmpty()) - sourceConfigFileName = path.Join(tempDir, "kubeconfig") - err = util.CreateKubeconfigFile(cfg, sourceConfigFileName) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - err = workapiv1.Install(scheme.Scheme) gomega.Expect(err).NotTo(gomega.HaveOccurred()) features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates) + switch sourceDriver { + case util.KubeDriver: + sourceConfigFileName = path.Join(tempDir, "kubeconfig") + err = util.CreateKubeconfigFile(cfg, sourceConfigFileName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + hubHash = helper.HubHash(cfg.Host) + + hubWorkClient, err = workclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // start hub controller + go func() { + opts := hub.NewWorkHubManagerOptions() + opts.WorkDriver = "kube" + opts.WorkDriverConfig = sourceConfigFileName + hubConfig := hub.NewWorkHubManagerConfig(opts) + err := hubConfig.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{ + KubeConfig: cfg, + EventRecorder: util.NewIntegrationTestEventRecorder("hub"), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + case util.MQTTDriver: + sourceID := "work-test-mqtt" + err = util.RunMQTTBroker() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + sourceConfigFileName = path.Join(tempDir, "mqttconfig") + err = util.CreateMQTTConfigFile(sourceConfigFileName, sourceID) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + hubHash = helper.HubHash(util.MQTTBrokerHost) + + watcherStore, err := workstore.NewSourceLocalWatcherStore(envCtx, func(ctx context.Context) ([]*workapiv1.ManifestWork, error) { + return []*workapiv1.ManifestWork{}, nil + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + sourceClient, err := work.NewClientHolderBuilder(util.NewMQTTSourceOptions(sourceID)). + WithClientID(fmt.Sprintf("%s-%s", sourceID, rand.String(5))). + WithSourceID(sourceID). + WithCodecs(sourcecodec.NewManifestBundleCodec()). + WithWorkClientWatcherStore(watcherStore). + NewSourceClientHolder(envCtx) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + hubWorkClient = sourceClient.WorkInterface() + default: + ginkgo.Fail(fmt.Sprintf("unsupported test driver %s", sourceDriver)) + } + spokeRestConfig = cfg - hubHash = helper.HubHash(spokeRestConfig.Host) + spokeKubeClient, err = kubernetes.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hubWorkClient, err = workclientset.NewForConfig(cfg) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) spokeWorkClient, err = workclientset.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) hubClusterClient, err = clusterclientset.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - opts := hub.NewWorkHubManagerOptions() - opts.WorkDriver = "kube" - opts.WorkDriverConfig = sourceConfigFileName - hubConfig := hub.NewWorkHubManagerConfig(opts) - - // start hub controller - go func() { - err := hubConfig.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{ - KubeConfig: cfg, - EventRecorder: util.NewIntegrationTestEventRecorder("hub"), - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() }) var _ = ginkgo.AfterSuite(func() { @@ -131,6 +178,9 @@ var _ = ginkgo.AfterSuite(func() { err := testEnv.Stop() gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = util.StopMQTTBroker() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + if tempDir != "" { os.RemoveAll(tempDir) } diff --git a/test/integration/work/updatestrategy_test.go b/test/integration/work/updatestrategy_test.go index fc0920dc4..4cc44785e 100644 --- a/test/integration/work/updatestrategy_test.go +++ b/test/integration/work/updatestrategy_test.go @@ -12,7 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" - utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/utils/ptr" workapiv1 "open-cluster-management.io/api/work/v1" @@ -27,19 +27,27 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { var commOptions *commonoptions.AgentOptions var cancel context.CancelFunc + var workName string var work *workapiv1.ManifestWork var manifests []workapiv1.Manifest var err error ginkgo.BeforeEach(func() { + clusterName := rand.String(5) + workName = fmt.Sprintf("update-strategy-work-%s", rand.String(5)) + o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.WorkloadSourceDriver = sourceDriver o.WorkloadSourceConfig = sourceConfigFileName + if sourceDriver != util.KubeDriver { + o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) + o.CloudEventsClientCodecs = []string{"manifestbundle"} + } commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + commOptions.SpokeClusterName = clusterName ns := &corev1.Namespace{} ns.Name = commOptions.SpokeClusterName @@ -55,7 +63,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests) }) ginkgo.AfterEach(func() { @@ -100,13 +108,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + if err != nil { + return err + } + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -279,13 +295,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // update work err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -337,13 +361,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + if err != nil { + return err + } + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -354,13 +386,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // remove the replica field and apply should work unstructured.RemoveNestedField(object.Object, "spec", "replicas") gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -422,13 +462,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + if err != nil { + return err + } + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -453,13 +501,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } - work.Spec.Workload.Manifests[0] = util.ToManifest(object) - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[0] = util.ToManifest(object) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + if err != nil { + return err + } + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -533,13 +589,21 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { // update deleteOption of the first work gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + newWork := updatedWork.DeepCopy() + newWork.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan} + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) if err != nil { return err } - work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan} - _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) @@ -560,7 +624,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ginkgo.It("should not increase the workload generation when nothing changes", func() { nestedWorkNamespace := "default" - nestedWorkName := fmt.Sprintf("nested-work-%s", utilrand.String(5)) + nestedWorkName := fmt.Sprintf("nested-work-%s", rand.String(5)) cm := util.NewConfigmap(nestedWorkNamespace, "cm-test", map[string]string{"a": "b"}, []string{}) nestedWork := util.NewManifestWork(nestedWorkNamespace, nestedWorkName, []workapiv1.Manifest{util.ToManifest(cm)}) @@ -569,7 +633,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { Kind: "ManifestWork", } - work := util.NewManifestWork(commOptions.SpokeClusterName, "", []workapiv1.Manifest{util.ToManifest(nestedWork)}) + work := util.NewManifestWork(commOptions.SpokeClusterName, workName, []workapiv1.Manifest{util.ToManifest(nestedWork)}) work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ { ResourceIdentifier: workapiv1.ResourceIdentifier{ diff --git a/test/integration/work/work_test.go b/test/integration/work/work_test.go index e7976d928..8b028369d 100644 --- a/test/integration/work/work_test.go +++ b/test/integration/work/work_test.go @@ -13,7 +13,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/dynamic" "k8s.io/client-go/util/retry" @@ -22,6 +23,7 @@ import ( commonoptions "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/work/spoke" "open-cluster-management.io/ocm/test/integration/util" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" ) func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) { @@ -38,21 +40,32 @@ var _ = ginkgo.Describe("ManifestWork", func() { var commOptions *commonoptions.AgentOptions var cancel context.CancelFunc + var workName string var work *workapiv1.ManifestWork + var expectedFinalizer string var manifests []workapiv1.Manifest var appliedManifestWorkName string var err error ginkgo.BeforeEach(func() { + expectedFinalizer = workapiv1.ManifestWorkFinalizer + workName = fmt.Sprintf("work-%s", rand.String(5)) + clusterName := rand.String(5) + o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second o.WorkloadSourceDriver = sourceDriver o.WorkloadSourceConfig = sourceConfigFileName + if sourceDriver != util.KubeDriver { + expectedFinalizer = store.ManifestWorkFinalizer + o.CloudEventsClientID = fmt.Sprintf("%s-work-agent", clusterName) + o.CloudEventsClientCodecs = []string{"manifestbundle"} + } commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + commOptions.SpokeClusterName = clusterName ns := &corev1.Namespace{} ns.Name = commOptions.SpokeClusterName @@ -68,9 +81,9 @@ var _ = ginkgo.Describe("ManifestWork", func() { }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + work = util.NewManifestWork(commOptions.SpokeClusterName, workName, manifests) work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + appliedManifestWorkName = util.AppliedManifestWorkName(sourceDriver, hubHash, work) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -131,18 +144,24 @@ var _ = ginkgo.Describe("ManifestWork", func() { newManifests := []workapiv1.Manifest{ util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)), } - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = newManifests + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) // check if resource created by stale manifest is deleted once it is removed from applied resource list gomega.Eventually(func() error { - appliedManifestWork, err := hubWorkClient.WorkV1().AppliedManifestWorks().Get( + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( context.Background(), appliedManifestWorkName, metav1.GetOptions{}) if err != nil { return err @@ -162,7 +181,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { }) ginkgo.It("should delete work successfully", func() { - util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertFinalizerAdded(work.Namespace, work.Name, expectedFinalizer, hubWorkClient, eventuallyTimeout, eventuallyInterval) err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -204,10 +223,17 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, "cm4", map[string]string{"e": "f"}, nil)), } - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = newManifests + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -225,7 +251,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { for _, appliedResource := range appliedManifestWork.Status.AppliedResources { if appliedResource.Name == "cm3" { - return fmt.Errorf("found appled resource cm3") + return fmt.Errorf("found applied resource cm3") } } @@ -237,7 +263,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { }) ginkgo.It("should delete work successfully", func() { - util.AssertFinalizerAdded(work.Namespace, work.Name, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertFinalizerAdded(work.Namespace, work.Name, expectedFinalizer, hubWorkClient, eventuallyTimeout, eventuallyInterval) err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -287,7 +313,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("should merge annotation of existing CR", func() { @@ -303,7 +329,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) // update object label obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1") @@ -318,10 +344,17 @@ var _ = ginkgo.Describe("ManifestWork", func() { // Update manifestwork obj.SetAnnotations(map[string]string{"foo1": "bar1"}) - updatework, err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Get(context.TODO(), work.Name, metav1.GetOptions{}) + work, err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Get(context.TODO(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - updatework.Spec.Workload.Manifests[1] = util.ToManifest(obj) - _, err = hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Update(context.TODO(), updatework, metav1.UpdateOptions{}) + + newWork := work.DeepCopy() + newWork.Spec.Workload.Manifests[1] = util.ToManifest(obj) + + pathBytes, err := util.NewWorkPatch(work, newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), work.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for annotation merge @@ -353,7 +386,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) // update object finalizer obj, gvr, err := util.GuestbookCr(commOptions.SpokeClusterName, "guestbook1") @@ -371,10 +404,18 @@ var _ = ginkgo.Describe("ManifestWork", func() { obj.SetFinalizers(nil) // set an annotation to make sure the cr will be updated, so that we can check whether the finalizer changest. obj.SetAnnotations(map[string]string{"foo": "bar"}) - updatework, err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Get(context.TODO(), work.Name, metav1.GetOptions{}) + + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Get(context.TODO(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - updatework.Spec.Workload.Manifests[1] = util.ToManifest(obj) - _, err = hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Update(context.TODO(), updatework, metav1.UpdateOptions{}) + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests[1] = util.ToManifest(obj) + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for annotation merge @@ -417,7 +458,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) // delete manifest work err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) @@ -485,7 +526,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -537,7 +578,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("should update Service Account and Deployment successfully", func() { @@ -561,7 +602,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertExistenceOfResources(gvrs, namespaces, names, spokeDynamicClient, eventuallyTimeout, eventuallyInterval) ginkgo.By("check if applied resources in status are updated") - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) // update manifests in work: 1) swap service account and deployment; 2) rename service account; 3) update deployment ginkgo.By("update manifests in work") @@ -583,10 +624,17 @@ var _ = ginkgo.Describe("ManifestWork", func() { updateTime := metav1.Now() time.Sleep(1 * time.Second) - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = newManifests - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = newManifests + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("check existence of all maintained resources") @@ -647,7 +695,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertWorkGeneration(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, eventuallyTimeout, eventuallyInterval) ginkgo.By("check if applied resources in status are updated") - util.AssertAppliedResources(hubHash, work.Name, gvrs, namespaces, names, hubWorkClient, eventuallyTimeout, eventuallyInterval) + util.AssertAppliedResources(appliedManifestWorkName, gvrs, namespaces, names, spokeWorkClient, eventuallyTimeout, eventuallyInterval) ginkgo.By("check if resources which are no longer maintained have been deleted") util.AssertNonexistenceOfResources( @@ -679,15 +727,22 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + updatedWork, err := hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWork := updatedWork.DeepCopy() + newWork.Spec.Workload.Manifests = manifests[1:] + + pathBytes, err := util.NewWorkPatch(updatedWork, newWork) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = manifests[1:] - work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + + _, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Patch( + context.Background(), updatedWork.Name, types.MergePatchType, pathBytes, metav1.PatchOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) - err := hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = hubWorkClient.WorkV1().ManifestWorks(work.Namespace).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // remove finalizer from the applied resources for stale manifest after 2 seconds diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..967e06074 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,26 @@ +# Changes + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) + + +### Features + +* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..f67e3c7ee --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,579 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://cloud.google.com/compute/docs/metadata/overview. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var defaultClient = &Client{hc: newDefaultHTTPClient()} + +func newDefaultHTTPClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + IdleConnTimeout: 60 * time.Second, + }, + Timeout: 5 * time.Second, + } +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(context.Background(), c.k) + } else { + v, err = cl.GetWithContext(context.Background(), c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + resolver := &net.Resolver{} + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.SubscribeWithContext on the default client. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext calls Client.SubscribeWithContext on the default client. +func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + return defaultClient.SubscribeWithContext(ctx, suffix, fn) +} + +// Get calls Client.GetWithContext on the default client. +// +// Deprecated: Please use the context aware variant [GetWithContext]. +func Get(suffix string) (string, error) { + return defaultClient.GetWithContext(context.Background(), suffix) +} + +// GetWithContext calls Client.GetWithContext on the default client. +func GetWithContext(ctx context.Context, suffix string) (string, error) { + return defaultClient.GetWithContext(ctx, suffix) +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Email calls Client.Email on the default client. +func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. +// Returns the client that uses the specified http.Client for HTTP requests. +// If nil is specified, returns the default client. +func NewClient(c *http.Client) *Client { + if c == nil { + return defaultClient + } + + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + suffix = strings.TrimLeft(suffix, "/") + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, err := http.NewRequestWithContext(ctx, "GET", u, nil) + if err != nil { + return "", "", err + } + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + var res *http.Response + var reqErr error + retryer := newRetryer() + for { + res, reqErr = c.hc.Do(req) + var code int + if res != nil { + code = res.StatusCode + } + if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if err := sleep(ctx, delay); err != nil { + return "", "", err + } + continue + } + break + } + if reqErr != nil { + return "", "", reqErr + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := io.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +// +// Deprecated: Please use the context aware variant [Client.GetWithContext]. +func (c *Client) Get(suffix string) (string, error) { + return c.GetWithContext(context.Background(), suffix) +} + +// GetWithContext returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { + val, _, err := c.getETag(ctx, suffix) + return val, err +} + +func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) { + s, err = c.GetWithContext(ctx, suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.GetWithContext(context.Background(), suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") +} + +// Email returns the email address associated with the service account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Email(serviceAccount string) (string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed(context.Background(), "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.GetWithContext(context.Background(), "instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + return c.getTrimmed(context.Background(), "instance/name") +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed(context.Background(), "instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.GetWithContext(context.Background(), "instance/attributes/"+attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.GetWithContext(context.Background(), "project/attributes/"+attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Deprecated: Please use the context aware variant [Client.SubscribeWithContext]. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) +} + +// SubscribeWithContext subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// SubscribeWithContext calls fn with the latest metadata value indicated by the +// provided suffix. If the metadata value is deleted, fn is called with the +// empty string and ok false. Subscribe blocks until fn returns a non-nil error +// or the value is deleted. Subscribe returns the error value returned from the +// last call to fn, which may be nil when ok == false. +func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(ctx, suffix) + if err != nil { + return err + } + + if err := fn(ctx, val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(ctx, val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go new file mode 100644 index 000000000..3d4bc75dd --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -0,0 +1,114 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "io" + "math/rand" + "net/http" + "time" +) + +const ( + maxRetryAttempts = 5 +) + +var ( + syscallRetryable = func(error) bool { return false } +) + +// defaultBackoff is basically equivalent to gax.Backoff without the need for +// the dependency. +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// sleep is the equivalent of gax.Sleep without the need for the dependency. +func sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +func newRetryer() *metadataRetryer { + return &metadataRetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +type metadataRetryer struct { + bo backoff + attempts int +} + +func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { + if status == http.StatusOK { + return 0, false + } + retryOk := shouldRetry(status, err) + if !retryOk { + return 0, false + } + if r.attempts == maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go new file mode 100644 index 000000000..bb412f891 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -0,0 +1,26 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package metadata + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/vendor/golang.org/x/oauth2/authhandler/authhandler.go new file mode 100644 index 000000000..9bc6cd7bc --- /dev/null +++ b/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package authhandler implements a TokenSource to support +// "three-legged OAuth 2.0" via a custom AuthorizationHandler. +package authhandler + +import ( + "context" + "errors" + + "golang.org/x/oauth2" +) + +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" +) + +// PKCEParams holds parameters to support PKCE. +type PKCEParams struct { + Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. + ChallengeMethod string // The encryption method (ex. S256). + Verifier string // The original, non-encrypted secret. +} + +// AuthorizationHandler is a 3-legged-OAuth helper that prompts +// the user for OAuth consent at the specified auth code URL +// and returns an auth code and state upon approval. +type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) + +// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. +// +// The pkce parameter supports PKCE flow, which uses code challenge and code verifier +// to prevent CSRF attacks. A unique code challenge and code verifier should be generated +// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) +} + +// TokenSource returns an oauth2.TokenSource that fetches access tokens +// using 3-legged-OAuth flow. +// +// The provided context.Context is used for oauth2 Exchange operation. +// +// The provided oauth2.Config should be a full configuration containing AuthURL, +// TokenURL, and Scope. +// +// An environment-specific AuthorizationHandler is used to obtain user consent. +// +// Per the OAuth protocol, a unique "state" string should be specified here. +// This token source will verify that the "state" is identical in the request +// and response before exchanging the auth code for OAuth token to prevent CSRF +// attacks. +func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { + return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) +} + +type authHandlerSource struct { + ctx context.Context + config *oauth2.Config + authHandler AuthorizationHandler + state string + pkce *PKCEParams +} + +func (source authHandlerSource) Token() (*oauth2.Token, error) { + // Step 1: Obtain auth code. + var authCodeUrlOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { + authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), + oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} + } + url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) + code, state, err := source.authHandler(url) + if err != nil { + return nil, err + } + if state != source.state { + return nil, errors.New("state mismatch in 3-legged-OAuth flow") + } + + // Step 2: Exchange auth code for access token. + var exchangeOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Verifier != "" { + exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} + } + return source.config.Exchange(source.ctx, code, exchangeOptions...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..564920bd4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// The current version of this library requires at least Go 1.17 to build, +// so first generation App Engine runtimes (<= Go 1.9) are unsupported. +// Previously, on first generation App Engine runtimes, AppEngineTokenSource +// returned a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..df958359a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,317 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "runtime" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" +) + +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + defaultUniverseDomain = "googleapis.com" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte + + // UniverseDomainProvider returns the default service domain for a given + // Cloud universe. Optional. + // + // On GCE, UniverseDomainProvider should return the universe domain value + // from Google Compute Engine (GCE)'s metadata server. See also [The attached service + // account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). + // If the GCE metadata server returns a 404 error, the default universe + // domain value should be returned. If the GCE metadata server returns an + // error other than 404, the error should be returned. + UniverseDomainProvider func() (string, error) + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return defaultUniverseDomain + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. If present, UniverseDomainProvider will be invoked and its return +// value will be cached. +// +// The default value is "googleapis.com". +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && c.UniverseDomainProvider != nil { + // On Google Compute Engine, an App Engine standard second generation + // runtime, or App Engine flexible, use an externally provided function + // to request the universe domain from the metadata server. + ud, err := c.UniverseDomainProvider() + if err != nil { + return "", err + } + c.universeDomain = ud + } + // If no UniverseDomainProvider (meaning not on Google Compute Engine), or + // in case of any (non-error) empty return value from + // UniverseDomainProvider, set the default universe domain. + if c.universeDomain == "" { + c.universeDomain = defaultUniverseDomain + } + return c.universeDomain, nil +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// CredentialsParams holds user supplied parameters that are used together +// with a credentials file for building a Credentials object. +type CredentialsParams struct { + // Scopes is the list OAuth scopes. Required. + // Example: https://www.googleapis.com/auth/cloud-platform + Scopes []string + + // Subject is the user email used for domain wide delegation (see + // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. + AuthHandler authhandler.AuthorizationHandler + + // State is a unique string used with AuthHandler. Required for 3LO flow. + State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string +} + +func (params CredentialsParams) deepCopy() CredentialsParams { + paramsCopy := params + paramsCopy.Scopes = make([]string, len(params.Scopes)) + copy(paramsCopy.Scopes, params.Scopes) + return paramsCopy +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentialsWithParams searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, params) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) + } + + // Third, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + universeDomainProvider := func() (string, error) { + universeDomain, err := metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + return defaultUniverseDomain, nil + } else { + return "", err + } + } + return universeDomain, nil + } + return &Credentials{ + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + UniverseDomainProvider: universeDomainProvider, + universeDomain: params.UniverseDomain, + }, nil + } + + // None are found; return helpful error. + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) +} + +// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return FindDefaultCredentialsWithParams(ctx, params) +} + +// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), +// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh +// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud +// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. + config, _ := ConfigFromJSON(jsonData, params.Scopes...) + if config != nil { + return &Credentials{ + ProjectID: "", + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), + JSON: jsonData, + }, nil + } + + // Otherwise, parse jsonData as one of the other supported credentials files. + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = defaultUniverseDomain + } + + ts, err := f.tokenSource(ctx, params) + if err != nil { + return nil, err + } + ts = newErrWrappingTokenSource(ts) + return &Credentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, + }, nil +} + +// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return CredentialsFromJSONWithParams(ctx, jsonData, params) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSONWithParams(ctx, b, params) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 000000000..830d268c1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,53 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// # Workload and Workforce Identity Federation +// +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. +// +// # Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000..d84dd0047 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go new file mode 100644 index 000000000..ca27c2e98 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -0,0 +1,577 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "golang.org/x/oauth2" +) + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + +const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" +) + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +// SignRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) SignRequest(req *http.Request) error { + signedRequest := cloneRequest(req) + timestamp := now() + + signedRequest.Header.Add("host", requestHost(req)) + + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) + + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) + + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} + +func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } + return "aws" +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } + if cs.requestSigner == nil { + headers := make(map[string]string) + if cs.shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) + if err != nil { + return "", err + } + cs.region, err = cs.getRegion(headers) + if err != nil { + return "", err + } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.imdsv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) + } + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil + } + + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.regionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SessionToken: getenv(awsSessionToken), + }, nil + } + + roleName, err := cs.getMetadataRoleName(headers) + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") + } + + return &credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) + if err != nil { + return result, err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) + } + + return string(respBody), nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 000000000..6c81a6872 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + + // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior. + if stsResp.ExpiresIn <= 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go new file mode 100644 index 000000000..dca5681a4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -0,0 +1,313 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go new file mode 100644 index 000000000..33766b972 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" +) + +type fileCredentialSource struct { + File string + Format Format +} + +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + +func (cs fileCredentialSource) subjectToken() (string, error) { + tokenFile, err := os.Open(cs.File) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) + } + defer tokenFile.Close() + tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go new file mode 100644 index 000000000..1d5aad2e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 000000000..6c1abdf2d --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go new file mode 100644 index 000000000..71a7184e0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "golang.org/x/oauth2" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format Format + ctx context.Context +} + +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(respBody), nil + case "": + return string(respBody), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..ba931c2c3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,309 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 default endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, +} + +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope, ""), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` + + // External Account fields + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + TokenURLExternal string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` + Delegates []string `json:"delegates"` + CredentialSource externalaccount.CredentialSource `json:"credential_source"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + + // Service account impersonation + SourceCredentials *credentialsFile `json:"source_credentials"` +} + +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + Subject: subject, // This is the user email to impersonate + Audience: f.Audience, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(params.Scopes, params.Subject) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: params.Scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: f.AuthURL, + TokenURL: f.TokenURL, + AuthStyle: oauth2.AuthStyleInParams, + }, + } + if cfg.Endpoint.AuthURL == "" { + cfg.Endpoint.AuthURL = Endpoint.AuthURL + } + if cfg.Endpoint.TokenURL == "" { + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case externalAccountKey: + cfg := &externalaccount.Config{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: &f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + } + return externalaccount.NewTokenSource(ctx, *cfg) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) + case impersonatedServiceAccount: + if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + ts, err := f.SourceCredentials.tokenSource(ctx, params) + if err != nil { + return nil, err + } + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: f.ServiceAccountImpersonationURL, + Scopes: params.Scopes, + Ts: ts, + Delegates: f.Delegates, + } + return oauth2.ReuseTokenSource(nil, imp), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 000000000..cb5820707 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go new file mode 100644 index 000000000..6bc3af110 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +type ImpersonateTokenSource struct { + // Ctx is the execution context of the impersonation process + // used to perform http call to the URL. Required + Ctx context.Context + // Ts is the source credential used to generate a token on the + // impersonated service account. Required. + Ts oauth2.TokenSource + + // URL is the endpoint to call to generate a token + // on behalf the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetimeString, + Scope: its.Scopes, + Delegates: its.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.Ctx, its.Ts) + req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.Ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go new file mode 100644 index 000000000..ebd520eac --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "encoding/base64" + "net/http" + "net/url" + + "golang.org/x/oauth2" +) + +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + // AuthStyle can be either basic or request-body + AuthStyle oauth2.AuthStyle + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + + switch c.AuthStyle { + case oauth2.AuthStyleInHeader: // AuthStyleInHeader corresponds to basic authentication as defined in rfc7617#2 + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + case oauth2.AuthStyleInParams: // AuthStyleInParams corresponds to request-body authentication with ClientID and ClientSecret in the message body. + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + case oauth2.AuthStyleAutoDetect: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go new file mode 100644 index 000000000..1a0bebd15 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -0,0 +1,125 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2" +) + +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +// The first 4 fields are all mandatory. headers can be used to pass additional +// headers beyond the bare minimum required by the token exchange. options can +// be used to pass additional JSON-structured options to the remote server. +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { + data := url.Values{} + data.Set("audience", request.Audience) + data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") + data.Set("requested_token_type", "urn:ietf:params:oauth:token-type:access_token") + data.Set("subject_token_type", request.SubjectTokenType) + data.Set("subject_token", request.SubjectToken) + data.Set("scope", strings.Join(request.Scope, " ")) + if options != nil { + opts, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + } + data.Set("options", string(opts)) + } + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) + authentication.InjectAuthentication(data, headers) + encodedData := data.Encode() + + req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) + } + req = req.WithContext(ctx) + for key, list := range headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Add("Content-Length", strconv.Itoa(len(encodedData))) + + resp, err := client.Do(req) + + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid response from Secure Token Server: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, err + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + var stsResp Response + err = json.Unmarshal(body, &stsResp) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) + + } + + return &stsResp, nil +} + +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..e89e6ae17 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + scopes: scopes, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil +} + +type jwtAccessTokenSource struct { + email, audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..456224bc7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..95015648b --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..b2bf18298 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..d475cbc08 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "context" + "fmt" + "net/url" + "os" + "sync" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } + // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with + // uri as the key, to avoid recreating for every RPC. + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +// +// Deprecated: use oauth.TokenSource instead. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + ri, _ := credentials.RequestInfoFromContext(ctx) + if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + creds, err := google.FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + + // If JSON is nil, the authentication is provided by the environment and not + // with a credentials file, e.g. when code is running on Google Cloud + // Platform. Use the returned token source. + if creds.JSON == nil { + return TokenSource{creds.TokenSource}, nil + } + + // If auth is provided by env variable or creds file, the behavior will be + // different based on whether scope is set. Because the returned + // creds.TokenSource does oauth with jwt by default, and it requires scope. + // We can only use it if scope is not empty, otherwise it will fail with + // missing scope error. + // + // If scope is set, use it, it should just work. + // + // If scope is not set, we try to use jwt directly without oauth (this only + // works if it's a service account). + + if len(scope) != 0 { + return TokenSource{creds.TokenSource}, nil + } + + // Try to convert JSON to a jwt config without setting the optional scope + // parameter to check if it's a service account (the function errors if it's + // not). This is necessary because the returned config doesn't show the type + // of the account. + if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil { + // If this fails, it's not a service account, return the original + // TokenSource from above. + return TokenSource{creds.TokenSource}, nil + } + + // If it's a service account, create a JWT only access with the key. + return NewJWTAccessFromKey(creds.JSON) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7da75ff38..6103bb677 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,6 @@ +# cloud.google.com/go/compute/metadata v0.3.0 +## explicit; go 1.19 +cloud.google.com/go/compute/metadata # github.com/BurntSushi/toml v1.3.2 ## explicit; go 1.16 github.com/BurntSushi/toml @@ -613,7 +616,15 @@ golang.org/x/net/websocket # golang.org/x/oauth2 v0.20.0 ## explicit; go 1.18 golang.org/x/oauth2 +golang.org/x/oauth2/authhandler +golang.org/x/oauth2/google +golang.org/x/oauth2/google/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/impersonate +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal +golang.org/x/oauth2/jws +golang.org/x/oauth2/jwt # golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/semaphore @@ -695,6 +706,7 @@ google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto @@ -1587,7 +1599,7 @@ open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.14.1-0.20240628095929-9ffb1b19e566 +# open-cluster-management.io/sdk-go v0.14.1-0.20240906071839-3e8465851efc ## explicit; go 1.22.0 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1 @@ -1614,6 +1626,7 @@ open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister open-cluster-management.io/sdk-go/pkg/cloudevents/work/common +open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/score.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/score.go new file mode 100644 index 000000000..696d33074 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/score.go @@ -0,0 +1,43 @@ +package v1alpha1 + +import ( + "k8s.io/klog/v2" +) + +// MaxScore is the upper bound of the normalized score. +const MaxScore = 100 + +// MinScore is the lower bound of the normalized score. +const MinScore = -100 + +// scoreNormalizer holds the minimum and maximum values for normalization, +// provides a normalize library to generate scores for AddOnPlacementScore. +type scoreNormalizer struct { + min float64 + max float64 +} + +// NewScoreNormalizer creates a new instance of scoreNormalizer with given min and max values. +func NewScoreNormalizer(min, max float64) *scoreNormalizer { + return &scoreNormalizer{ + min: min, + max: max, + } +} + +// Normalize normalizes a given value to the range -100 to 100 based on the min and max values. +func (s *scoreNormalizer) Normalize(value float64) (score int32, err error) { + if value > s.max { + // If the value exceeds the maximum, set score to MaxScore. + score = MaxScore + // If the value is less than or equal to the minimum, set score to MinScore. + } else if value <= s.min { + score = MinScore + } else { + // Otherwise, normalize the value to the range -100 to 100. + score = (int32)((MaxScore-MinScore)*(value-s.min)/(s.max-s.min) + MinScore) + } + + klog.V(2).Infof("value = %v, min = %v, max = %v, score = %v", value, s.min, s.max, score) + return score, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go index 86237d8c6..459b8af89 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strconv" + "time" cloudevents "github.com/cloudevents/sdk-go/v2" @@ -74,27 +75,28 @@ func (c *CloudEventAgentClient[T]) ReconnectedChan() <-chan struct{} { // Resync the resources spec by sending a spec resync request from the current to the given source. func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) error { - // list the resource objects that are maintained by the current agent with the given source - objs, err := c.lister.List(types.ListOptions{Source: source, ClusterName: c.clusterName}) - if err != nil { - return err - } - - resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} - for i, obj := range objs { - resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + // list the resource objects that are maintained by the current agent with the given source + options := types.ListOptions{Source: source, ClusterName: c.clusterName, CloudEventsDataType: eventDataType} + objs, err := c.lister.List(options) if err != nil { return err } - resources.Versions[i] = payload.ResourceVersion{ - ResourceID: string(obj.GetUID()), - ResourceVersion: resourceVersion, + resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} + for i, obj := range objs { + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return err + } + + resources.Versions[i] = payload.ResourceVersion{ + ResourceID: string(obj.GetUID()), + ResourceVersion: resourceVersion, + } } - } - // only resync the resources whose event data type is registered - for eventDataType := range c.codecs { eventType := types.CloudEventsType{ CloudEventsDataType: eventDataType, SubResource: types.SubResourceSpec, @@ -150,8 +152,6 @@ func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...Re } func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { - klog.V(4).Infof("Received event:\n%s", evt) - eventType, err := types.ParseCloudEventsType(evt.Type()) if err != nil { klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) @@ -164,9 +164,11 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents. return } + startTime := time.Now() if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { klog.Errorf("failed to resync manifestsstatus, %v", err) } + updateResourceStatusResyncDurationMetric(evt.Source(), c.clusterName, eventType.CloudEventsDataType.String(), startTime) return } @@ -188,7 +190,7 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents. return } - action, err := c.specAction(evt.Source(), obj) + action, err := c.specAction(evt.Source(), eventType.CloudEventsDataType, obj) if err != nil { klog.Errorf("failed to generate spec action %s, %v", evt, err) return @@ -215,7 +217,8 @@ func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents. func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event, ) error { - objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) + options := types.ListOptions{ClusterName: c.clusterName, Source: evt.Source(), CloudEventsDataType: eventDataType} + objs, err := c.lister.List(options) if err != nil { return err } @@ -268,8 +271,10 @@ func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( return nil } -func (c *CloudEventAgentClient[T]) specAction(source string, obj T) (evt types.ResourceAction, err error) { - objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: source}) +func (c *CloudEventAgentClient[T]) specAction( + source string, eventDataType types.CloudEventsDataType, obj T) (evt types.ResourceAction, err error) { + options := types.ListOptions{ClusterName: c.clusterName, Source: source, CloudEventsDataType: eventDataType} + objs, err := c.lister.List(options) if err != nil { return evt, err } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics_collector.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics_collector.go new file mode 100644 index 000000000..bdb24cbd5 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/metrics_collector.go @@ -0,0 +1,138 @@ +package generic + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// Subsystem used to define the metrics: +const metricsSubsystem = "resources" + +// Names of the labels added to metrics: +const ( + metricsSourceLabel = "source" + metricsClusterLabel = "cluster" + metrucsDataTypeLabel = "type" +) + +// metricsLabels - Array of labels added to metrics: +var metricsLabels = []string{ + metricsSourceLabel, // source + metricsClusterLabel, // cluster + metrucsDataTypeLabel, // resource type +} + +// Names of the metrics: +const ( + specResyncDurationMetric = "spec_resync_duration_seconds" + statusResyncDurationMetric = "status_resync_duration_seconds" +) + +// The resource spec resync duration metric is a histogram with a base metric name of 'resource_spec_resync_duration_second' +// exposes multiple time series during a scrape: +// 1. cumulative counters for the observation buckets, exposed as 'resource_spec_resync_duration_seconds_bucket{le=""}' +// 2. the total sum of all observed values, exposed as 'resource_spec_resync_duration_seconds_sum' +// 3. the count of events that have been observed, exposed as 'resource_spec_resync_duration_seconds_count' (identical to 'resource_spec_resync_duration_seconds_bucket{le="+Inf"}' above) +// For example, 2 resource spec resync for manifests type that have been observed, one taking 0.5s and the other taking 0.7s, would result in the following metrics: +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="0.1"} 0 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="0.2"} 0 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="0.5"} 1 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="1.0"} 2 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="2.0"} 2 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="10.0"} 2 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="30.0"} 2 +// resource_spec_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests",le="+Inf"} 2 +// resource_spec_resync_duration_seconds_sum{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests"} 1.2 +// resource_spec_resync_duration_seconds_count{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifests"} 2 +var resourceSpecResyncDurationMetric = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: metricsSubsystem, + Name: specResyncDurationMetric, + Help: "The duration of the resource spec resync in seconds.", + Buckets: []float64{ + 0.1, + 0.2, + 0.5, + 1.0, + 2.0, + 10.0, + 30.0, + }, + }, + metricsLabels, +) + +// The resource status resync duration metric is a histogram with a base metric name of 'resource_status_resync_duration_second' +// exposes multiple time series during a scrape: +// 1. cumulative counters for the observation buckets, exposed as 'resource_status_resync_duration_seconds_bucket{le=""}' +// 2. the total sum of all observed values, exposed as 'resource_status_resync_duration_seconds_sum' +// 3. the count of events that have been observed, exposed as 'resource_status_resync_duration_seconds_count' (identical to 'resource_status_resync_duration_seconds_bucket{le="+Inf"}' above) +// For example, 2 resource status resync for manifestbundles type that have been observed, one taking 0.5s and the other taking 1.1s, would result in the following metrics: +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="0.1"} 0 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="0.2"} 0 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="0.5"} 1 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="1.0"} 1 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="2.0"} 2 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="10.0"} 2 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="30.0"} 2 +// resource_status_resync_duration_seconds_bucket{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles",le="+Inf"} 2 +// resource_status_resync_duration_seconds_sum{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles"} 1.6 +// resource_status_resync_duration_seconds_count{source="source1",cluster="cluster1",type="io.open-cluster-management.works.v1alpha1.manifestbundles"} 2 +var resourceStatusResyncDurationMetric = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: metricsSubsystem, + Name: statusResyncDurationMetric, + Help: "The duration of the resource status resync in seconds.", + Buckets: []float64{ + 0.1, + 0.2, + 0.5, + 1.0, + 2.0, + 10.0, + 30.0, + }, + }, + metricsLabels, +) + +// Register the metrics: +func RegisterResourceResyncMetrics() { + prometheus.MustRegister(resourceSpecResyncDurationMetric) + prometheus.MustRegister(resourceStatusResyncDurationMetric) +} + +// Unregister the metrics: +func UnregisterResourceResyncMetrics() { + prometheus.Unregister(resourceStatusResyncDurationMetric) + prometheus.Unregister(resourceStatusResyncDurationMetric) +} + +// ResetResourceResyncMetricsCollectors resets all collectors +func ResetResourceResyncMetricsCollectors() { + resourceSpecResyncDurationMetric.Reset() + resourceStatusResyncDurationMetric.Reset() +} + +// updateResourceSpecResyncDurationMetric updates the resource spec resync duration metric: +func updateResourceSpecResyncDurationMetric(source, cluster, dataType string, startTime time.Time) { + labels := prometheus.Labels{ + metricsSourceLabel: source, + metricsClusterLabel: cluster, + metrucsDataTypeLabel: dataType, + } + duration := time.Since(startTime) + resourceSpecResyncDurationMetric.With(labels).Observe(duration.Seconds()) +} + +// updateResourceStatusResyncDurationMetric updates the resource status resync duration metric: +func updateResourceStatusResyncDurationMetric(source, cluster, dataType string, startTime time.Time) { + labels := prometheus.Labels{ + metricsSourceLabel: source, + metricsClusterLabel: cluster, + metrucsDataTypeLabel: dataType, + } + duration := time.Since(startTime) + resourceStatusResyncDurationMetric.With(labels).Observe(duration.Seconds()) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go index 7e94551bd..fad712f8a 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go @@ -43,7 +43,8 @@ func (o *grpcAgentOptions) Protocol(ctx context.Context) (options.CloudEventsPro // TODO: Update this code to determine the subscription source for the agent client. // Currently, the grpc agent client is not utilized, and the 'Source' field serves // as a placeholder with all the sources. - Source: types.SourceAll, + Source: types.SourceAll, + ClusterName: o.clusterName, }), ) if err != nil { diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go index 151618f9c..564ccf333 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go @@ -8,10 +8,12 @@ import ( "os" "time" + "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/oauth" "gopkg.in/yaml.v2" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" @@ -24,6 +26,7 @@ type GRPCOptions struct { CAFile string ClientCertFile string ClientKeyFile string + TokenFile string } // GRPCConfig holds the information needed to build connect to gRPC server as a given user. @@ -36,6 +39,8 @@ type GRPCConfig struct { ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` // ClientKeyFile is the file path to a client key file for TLS. ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` + // TokenFile is the file path to a token file for authentication. + TokenFile string `json:"tokenFile,omitempty" yaml:"tokenFile,omitempty"` } // BuildGRPCOptionsFromFlags builds configs from a config filepath. @@ -61,12 +66,16 @@ func BuildGRPCOptionsFromFlags(configPath string) (*GRPCOptions, error) { if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") } + if config.TokenFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting tokenFile requires caFile") + } return &GRPCOptions{ URL: config.URL, CAFile: config.CAFile, ClientCertFile: config.ClientCertFile, ClientKeyFile: config.ClientKeyFile, + TokenFile: config.TokenFile, }, nil } @@ -90,19 +99,45 @@ func (o *GRPCOptions) GetGRPCClientConn() (*grpc.ClientConn, error) { return nil, fmt.Errorf("invalid CA %s", o.CAFile) } - clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) - if err != nil { - return nil, err + // Prepare gRPC dial options. + diaOpts := []grpc.DialOption{} + // Create a TLS configuration with CA pool and TLS 1.3. + tlsConfig := &tls.Config{ + RootCAs: certPool, + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS13, } - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{clientCerts}, - RootCAs: certPool, - MinVersion: tls.VersionTLS13, - MaxVersion: tls.VersionTLS13, + // Check if client certificate and key files are provided for mutual TLS. + if len(o.ClientCertFile) != 0 && len(o.ClientKeyFile) != 0 { + // Load client certificate and key pair. + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + // Add client certificates to the TLS configuration. + tlsConfig.Certificates = []tls.Certificate{clientCerts} + diaOpts = append(diaOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + } else { + // token based authentication requires the configuration of transport credentials. + diaOpts = append(diaOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if len(o.TokenFile) != 0 { + // Use token-based authentication if token file is provided. + token, err := os.ReadFile(o.TokenFile) + if err != nil { + return nil, err + } + perRPCCred := oauth.TokenSource{ + TokenSource: oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: string(token), + })} + // Add per-RPC credentials to the dial options. + diaOpts = append(diaOpts, grpc.WithPerRPCCredentials(perRPCCred)) + } } - conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + // Establish a connection to the gRPC server. + conn, err := grpc.Dial(o.URL, diaOpts...) if err != nil { return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) } @@ -110,6 +145,7 @@ func (o *GRPCOptions) GetGRPCClientConn() (*grpc.ClientConn, error) { return conn, nil } + // Insecure connection option; should not be used in production. conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go index 5cc30a3cb..f895d8697 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go @@ -394,8 +394,10 @@ type SubscriptionRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The original source of the respond CloudEvent(s). + // Optional. The original source of the respond CloudEvent(s). Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + // Optional. The cluster name of the respond CloudEvent(s). + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` } func (x *SubscriptionRequest) Reset() { @@ -437,6 +439,13 @@ func (x *SubscriptionRequest) GetSource() string { return "" } +func (x *SubscriptionRequest) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + var File_cloudevent_proto protoreflect.FileDescriptor var file_cloudevent_proto_rawDesc = []byte{ @@ -496,27 +505,29 @@ var file_cloudevent_proto_rawDesc = []byte{ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x2d, 0x0a, 0x13, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x50, 0x0a, 0x13, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x32, 0xb3, 0x01, 0x0a, 0x11, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x46, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x21, 0x2e, 0x69, - 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x09, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x26, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, - 0x01, 0x42, 0x50, 0x5a, 0x4e, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x69, 0x6f, 0x2f, - 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0xb3, + 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, + 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x09, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x26, 0x2e, 0x69, 0x6f, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x22, 0x00, 0x30, 0x01, 0x42, 0x50, 0x5a, 0x4e, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x69, 0x6f, 0x2f, 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto index 30cf9c9fd..c6a5ae77d 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto @@ -71,8 +71,10 @@ message PublishRequest { } message SubscriptionRequest { - // Required. The original source of the respond CloudEvent(s). + // Optional. The original source of the respond CloudEvent(s). string source = 1; + // Optional. The cluster name of the respond CloudEvent(s). + string cluster_name = 2; } service CloudEventService { diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go index 361b1b5de..f227e4825 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go @@ -9,7 +9,8 @@ type Option func(*Protocol) error // SubscribeOption type SubscribeOption struct { - Source string + Source string + ClusterName string } // WithSubscribeOption sets the Subscribe configuration for the client. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go index 467fc2455..ff9afadae 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go @@ -94,8 +94,8 @@ func (p *Protocol) OpenInbound(ctx context.Context) error { return fmt.Errorf("the subscribe option must not be nil") } - if len(p.subscribeOption.Source) == 0 { - return fmt.Errorf("the subscribe option source must not be empty") + if len(p.subscribeOption.Source) == 0 && len(p.subscribeOption.ClusterName) == 0 { + return fmt.Errorf("the source and cluster name of subscribe option cannot both be empty") } p.openerMutex.Lock() @@ -103,13 +103,19 @@ func (p *Protocol) OpenInbound(ctx context.Context) error { logger := cecontext.LoggerFrom(ctx) subClient, err := p.client.Subscribe(ctx, &pbv1.SubscriptionRequest{ - Source: p.subscribeOption.Source, + Source: p.subscribeOption.Source, + ClusterName: p.subscribeOption.ClusterName, }) if err != nil { return err } - logger.Infof("subscribing events for: %v", p.subscribeOption.Source) + if p.subscribeOption.Source != "" { + logger.Infof("subscribing events for: %v", p.subscribeOption.Source) + } else { + logger.Infof("subscribing events for cluster: %v", p.subscribeOption.ClusterName) + } + go func() { for { msg, err := subClient.Recv() diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go index b27c1adf7..cd45043ea 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strconv" + "time" cloudevents "github.com/cloudevents/sdk-go/v2" @@ -71,27 +72,28 @@ func (c *CloudEventSourceClient[T]) ReconnectedChan() <-chan struct{} { // Resync the resources status by sending a status resync request from the current source to a specified cluster. func (c *CloudEventSourceClient[T]) Resync(ctx context.Context, clusterName string) error { - // list the resource objects that are maintained by the current source with a specified cluster - objs, err := c.lister.List(types.ListOptions{Source: c.sourceID, ClusterName: clusterName}) - if err != nil { - return err - } - - hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} - for i, obj := range objs { - statusHash, err := c.statusHashGetter(obj) + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + // list the resource objects that are maintained by the current source with a specified cluster + options := types.ListOptions{Source: c.sourceID, ClusterName: clusterName, CloudEventsDataType: eventDataType} + objs, err := c.lister.List(options) if err != nil { return err } - hashes.Hashes[i] = payload.ResourceStatusHash{ - ResourceID: string(obj.GetUID()), - StatusHash: statusHash, + hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} + for i, obj := range objs { + statusHash, err := c.statusHashGetter(obj) + if err != nil { + return err + } + + hashes.Hashes[i] = payload.ResourceStatusHash{ + ResourceID: string(obj.GetUID()), + StatusHash: statusHash, + } } - } - // only resync the resources whose event data type is registered - for eventDataType := range c.codecs { eventType := types.CloudEventsType{ CloudEventsDataType: eventDataType, SubResource: types.SubResourceStatus, @@ -144,8 +146,6 @@ func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...R } func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { - klog.V(4).Infof("Received event:\n%s", evt) - eventType, err := types.ParseCloudEventsType(evt.Type()) if err != nil { klog.Errorf("failed to parse cloud event type, %v", err) @@ -158,9 +158,17 @@ func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents return } + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + klog.Errorf("failed to get cluster name extension, %v", err) + return + } + + startTime := time.Now() if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { klog.Errorf("failed to resync resources spec, %v", err) } + updateResourceSpecResyncDurationMetric(c.sourceID, fmt.Sprintf("%s", clusterName), eventType.CloudEventsDataType.String(), startTime) return } @@ -216,7 +224,12 @@ func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( return err } - objs, err := c.lister.List(types.ListOptions{ClusterName: fmt.Sprintf("%s", clusterName), Source: c.sourceID}) + options := types.ListOptions{ + ClusterName: fmt.Sprintf("%s", clusterName), + Source: c.sourceID, + CloudEventsDataType: evtDataType, + } + objs, err := c.lister.List(options) if err != nil { return err } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go index 1c0ff503c..7a25c61dc 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go @@ -135,6 +135,9 @@ type ListOptions struct { // Agent use the source ID to restrict the list of returned objects by their source ID. // Defaults to all sources. Source string + + // CloudEventsDataType indicates the resource related cloud events data type. + CloudEventsDataType CloudEventsDataType } // CloudEventsDataType uniquely identifies the type of cloud event data. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go index f00b20641..17e7e3959 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go @@ -3,6 +3,8 @@ package client import ( "context" "fmt" + "net/http" + "strconv" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -17,6 +19,7 @@ import ( "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + workerrors "open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" ) @@ -70,44 +73,54 @@ func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts met func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { klog.V(4).Infof("getting manifestwork %s/%s", c.namespace, name) - return c.watcherStore.Get(c.namespace, name) + work, exists, err := c.watcherStore.Get(c.namespace, name) + if err != nil { + return nil, errors.NewInternalError(err) + } + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) + } + + return work, nil } func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { klog.V(4).Infof("list manifestworks from cluster %s", c.namespace) works, err := c.watcherStore.List(c.namespace, opts) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) } - items := []workv1.ManifestWork{} - for _, work := range works { - items = append(items, *work) - } - - return &workv1.ManifestWorkList{Items: items}, nil + return works, nil } func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { klog.V(4).Infof("watch manifestworks from cluster %s", c.namespace) - return c.watcherStore.GetWatcher(c.namespace, opts) + watcher, err := c.watcherStore.GetWatcher(c.namespace, opts) + if err != nil { + return nil, errors.NewInternalError(err) + } + return watcher, nil } func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { klog.V(4).Infof("patching manifestwork %s/%s", c.namespace, name) - lastWork, err := c.watcherStore.Get(c.namespace, name) + lastWork, exists, err := c.watcherStore.Get(c.namespace, name) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) + } + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) } patchedWork, err := utils.Patch(pt, lastWork, data) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) } eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) } eventType := types.CloudEventsType{ @@ -119,18 +132,44 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub statusUpdated, err := isStatusUpdate(subresources) if err != nil { - return nil, err + return nil, errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, err.Error(), 0, false) } if statusUpdated { eventType.Action = common.UpdateRequestAction + // publish the status update event to source, source will check the resource version + // and reject the update if it's status update is outdated. if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { - return nil, err + return nil, workerrors.NewPublishError(common.ManifestWorkGR, name, err) } + // Fetch the latest work from the store and verify the resource version to avoid updating the store + // with outdated work. Return a conflict error if the resource version is outdated. + // Due to the lack of read-modify-write guarantees in the store, race conditions may occur between + // this update operation and one from the agent informer after receiving the event from the source. + latestWork, exists, err := c.watcherStore.Get(c.namespace, name) + if err != nil { + return nil, errors.NewInternalError(err) + } + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) + } + lastResourceVersion, err := strconv.ParseInt(latestWork.GetResourceVersion(), 10, 64) + if err != nil { + return nil, errors.NewInternalError(err) + } + newResourceVersion, err := strconv.ParseInt(newWork.GetResourceVersion(), 10, 64) + if err != nil { + return nil, errors.NewInternalError(err) + } + // ensure the resource version of the work is not outdated + if newResourceVersion < lastResourceVersion { + // It's safe to return a conflict error here, even if the status update event + // has already been sent. The source may reject the update due to an outdated resource version. + return nil, errors.NewConflict(common.ManifestWorkGR, name, fmt.Errorf("the resource version of the work is outdated")) + } if err := c.watcherStore.Update(newWork); err != nil { - return nil, err - + return nil, errors.NewInternalError(err) } return newWork, nil } @@ -147,18 +186,18 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub eventType.Action = common.DeleteRequestAction if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { - return nil, err + return nil, workerrors.NewPublishError(common.ManifestWorkGR, name, err) } if err := c.watcherStore.Delete(newWork); err != nil { - return nil, err + return nil, errors.NewInternalError(err) } return newWork, nil } if err := c.watcherStore.Update(newWork); err != nil { - return nil, err + return nil, errors.NewInternalError(err) } return newWork, nil diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go index 36a8ee9e7..0a965a4cf 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/lister/lister.go @@ -31,5 +31,20 @@ func (l *WatcherStoreLister) List(options types.ListOptions) ([]*workv1.Manifest opts.LabelSelector = fmt.Sprintf("%s=%s", common.CloudEventsOriginalSourceLabelKey, options.Source) } - return l.store.List(options.ClusterName, opts) + list, err := l.store.List(options.ClusterName, opts) + if err != nil { + return nil, err + } + + works := []*workv1.ManifestWork{} + for _, work := range list.Items { + cloudEventsDataType := work.Annotations[common.CloudEventsDataTypeAnnotationKey] + if cloudEventsDataType != options.CloudEventsDataType.String() { + continue + } + + works = append(works, &work) + } + + return works, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors/errors.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors/errors.go new file mode 100644 index 000000000..0f594a851 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors/errors.go @@ -0,0 +1,36 @@ +package errors + +import ( + "fmt" + "net/http" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const StatusReasonPublishError metav1.StatusReason = "PublishError" + +// NewPublishError returns an error indicating the work could not be published, and the client can try again. +func NewPublishError(qualifiedResource schema.GroupResource, name string, err error) *errors.StatusError { + return &errors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: StatusReasonPublishError, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + Causes: []metav1.StatusCause{{Message: err.Error()}}, + }, + Message: fmt.Sprintf("Failed to publish work %s: %v", name, err), + }, + } +} + +// IsPublishError determines if err is a publish error which indicates that the request can be retried +// by the client. +func IsPublishError(err error) bool { + return errors.ReasonForError(err) == StatusReasonPublishError +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go index c4d35442d..f0b56a31d 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "net/http" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,6 +18,7 @@ import ( "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + workerrors "open-cluster-management.io/sdk-go/pkg/cloudevents/work/errors" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" @@ -50,7 +52,7 @@ func (c *ManifestWorkSourceClient) SetNamespace(namespace string) { func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { if manifestWork.Namespace != "" && manifestWork.Namespace != c.namespace { - return nil, errors.NewInvalid(common.ManifestWorkGK, "namespace", field.ErrorList{ + return nil, errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, field.ErrorList{ field.Invalid( field.NewPath("metadata").Child("namespace"), manifestWork.Namespace, @@ -59,13 +61,12 @@ func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *wor }) } - _, err := c.watcherStore.Get(c.namespace, manifestWork.Name) - if err == nil { - return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) + _, exists, err := c.watcherStore.Get(c.namespace, manifestWork.Name) + if err != nil { + return nil, errors.NewInternalError(err) } - - if !errors.IsNotFound(err) { - return nil, err + if exists { + return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) } // TODO if we support multiple data type in future, we may need to get the data type from @@ -81,17 +82,21 @@ func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *wor newWork.Namespace = c.namespace newWork.ResourceVersion = getWorkResourceVersion(manifestWork) - if err := utils.Validate(newWork); err != nil { - return nil, err + if err := utils.Encode(newWork); err != nil { + return nil, errors.NewInternalError(err) + } + + if errs := utils.Validate(newWork); len(errs) != 0 { + return nil, errors.NewInvalid(common.ManifestWorkGK, manifestWork.Name, errs) } if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { - return nil, err + return nil, workerrors.NewPublishError(common.ManifestWorkGR, manifestWork.Name, err) } // add the new work to the local cache. if err := c.watcherStore.Add(newWork); err != nil { - return nil, err + return nil, errors.NewInternalError(err) } return newWork.DeepCopy(), nil } @@ -105,12 +110,12 @@ func (c *ManifestWorkSourceClient) UpdateStatus(ctx context.Context, manifestWor } func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - work, err := c.watcherStore.Get(c.namespace, name) - if errors.IsNotFound(err) { - return nil - } + work, exists, err := c.watcherStore.Get(c.namespace, name) if err != nil { - return err + return errors.NewInternalError(err) + } + if !exists { + return nil } // TODO if we support multiple data type in future, we may need to get the data type from @@ -126,7 +131,7 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts deletingWork.DeletionTimestamp = &now if err := c.cloudEventsClient.Publish(ctx, eventType, deletingWork); err != nil { - return err + return workerrors.NewPublishError(common.ManifestWorkGR, name, err) } if len(work.Finalizers) == 0 { @@ -134,11 +139,17 @@ func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts // 1) the agent does not start yet, we delete this work from the local cache directly. // 2) the agent is running, but the status response does not be handled by source yet, // after the deleted status is back, we need ignore this work in the ManifestWorkSourceHandler. - return c.watcherStore.Delete(deletingWork) + if err := c.watcherStore.Delete(deletingWork); err != nil { + return errors.NewInternalError(err) + } + return nil } // update the work with deletion timestamp in the local cache. - return c.watcherStore.Update(deletingWork) + if err := c.watcherStore.Update(deletingWork); err != nil { + return errors.NewInternalError(err) + } + return nil } func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { @@ -147,43 +158,54 @@ func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts me func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { klog.V(4).Infof("getting manifestwork %s", name) - return c.watcherStore.Get(c.namespace, name) + work, exists, err := c.watcherStore.Get(c.namespace, name) + if err != nil { + return nil, errors.NewInternalError(err) + } + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) + } + + return work, nil } func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { klog.V(4).Infof("list manifestworks") works, err := c.watcherStore.List(c.namespace, opts) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) } - items := []workv1.ManifestWork{} - for _, work := range works { - items = append(items, *work) - } - - return &workv1.ManifestWorkList{Items: items}, nil + return works, nil } func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.watcherStore.GetWatcher(c.namespace, opts) + watcher, err := c.watcherStore.GetWatcher(c.namespace, opts) + if err != nil { + return nil, errors.NewInternalError(err) + } + return watcher, nil } func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { klog.V(4).Infof("patching manifestwork %s", name) if len(subresources) != 0 { - return nil, fmt.Errorf("unsupported to update subresources %v", subresources) + msg := fmt.Sprintf("unsupported to update subresources %v", subresources) + return nil, errors.NewGenericServerResponse(http.StatusMethodNotAllowed, "patch", common.ManifestWorkGR, name, msg, 0, false) } - lastWork, err := c.watcherStore.Get(c.namespace, name) + lastWork, exists, err := c.watcherStore.Get(c.namespace, name) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) + } + if !exists { + return nil, errors.NewNotFound(common.ManifestWorkGR, name) } patchedWork, err := utils.Patch(pt, lastWork, data) if err != nil { - return nil, err + return nil, errors.NewInternalError(err) } // TODO if we support multiple data type in future, we may need to get the data type from @@ -197,18 +219,19 @@ func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt ku newWork := patchedWork.DeepCopy() newWork.ResourceVersion = getWorkResourceVersion(patchedWork) - if err := utils.Validate(newWork); err != nil { - return nil, err + if errs := utils.Validate(newWork); len(errs) != 0 { + return nil, errors.NewInvalid(common.ManifestWorkGK, name, errs) } if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { - return nil, err + return nil, workerrors.NewPublishError(common.ManifestWorkGR, name, err) } // modify the updated work in the local cache. if err := c.watcherStore.Update(newWork); err != nil { - return nil, err + return nil, errors.NewInternalError(err) } + return newWork.DeepCopy(), nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go index cd83e7975..80e7f219c 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/lister/lister.go @@ -5,6 +5,7 @@ import ( workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/store" ) @@ -21,5 +22,21 @@ func NewWatcherStoreLister(store store.WorkClientWatcherStore) *WatcherStoreList // List returns the ManifestWorks from the WorkClientWatcherCache with list options. func (l *WatcherStoreLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { - return l.store.List(options.ClusterName, metav1.ListOptions{}) + list, err := l.store.List(options.ClusterName, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + works := []*workv1.ManifestWork{} + for _, work := range list.Items { + // Currently, the source client only support the ManifestBundle + // TODO: when supporting multiple cloud events data types, need a way + // to known the work event data type + if options.CloudEventsDataType != payload.ManifestBundleEventDataType { + continue + } + works = append(works, &work) + } + + return works, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go index d3a5b5bf9..173adbc45 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/base.go @@ -7,7 +7,6 @@ import ( "time" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubetypes "k8s.io/apimachinery/pkg/types" @@ -34,33 +33,43 @@ type baseStore struct { } // List the works from the store with the list options -func (b *baseStore) List(namespace string, opts metav1.ListOptions) ([]*workv1.ManifestWork, error) { +func (b *baseStore) List(namespace string, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { b.RLock() defer b.RUnlock() - return utils.ListWorksWithOptions(b.store, namespace, opts) + works, err := utils.ListWorksWithOptions(b.store, namespace, opts) + if err != nil { + return nil, err + } + + items := []workv1.ManifestWork{} + for _, work := range works { + items = append(items, *work) + } + + return &workv1.ManifestWorkList{Items: items}, nil } // Get a works from the store -func (b *baseStore) Get(namespace, name string) (*workv1.ManifestWork, error) { +func (b *baseStore) Get(namespace, name string) (*workv1.ManifestWork, bool, error) { b.RLock() defer b.RUnlock() obj, exists, err := b.store.GetByKey(fmt.Sprintf("%s/%s", namespace, name)) if err != nil { - return nil, err + return nil, false, err } if !exists { - return nil, errors.NewNotFound(common.ManifestWorkGR, name) + return nil, false, nil } work, ok := obj.(*workv1.ManifestWork) if !ok { - return nil, fmt.Errorf("unknown type %T", obj) + return nil, false, fmt.Errorf("unknown type %T", obj) } - return work, nil + return work, true, nil } // List all of works from the store @@ -162,7 +171,7 @@ func (b *workProcessor) handleWork(work *workv1.ManifestWork) error { // 1) the source is restarted and the local cache is not ready, requeue this work. // 2) (TODO) during the source restart, the work is deleted forcibly, we may need an // eviction mechanism for this. - return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) + return fmt.Errorf("the work %s does not exist", string(work.UID)) } updatedWork := lastWork.DeepCopy() diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go index 12193bb4b..cc307ac73 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/informer.go @@ -4,11 +4,11 @@ import ( "context" "fmt" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" @@ -112,10 +112,18 @@ func (s *AgentInformerWatcherStore) HandleReceivedWork(action types.ResourceActi case types.Added: return s.Add(work.DeepCopy()) case types.Modified: - lastWork, err := s.Get(work.Namespace, work.Name) + lastWork, exists, err := s.Get(work.Namespace, work.Name) if err != nil { return err } + if !exists { + return fmt.Errorf("the work %s/%s does not exist", work.Namespace, work.Name) + } + // prevent the work from being updated if it is deleting + if !lastWork.GetDeletionTimestamp().IsZero() { + klog.Warningf("the work %s/%s is deleting, ignore the update", work.Namespace, work.Name) + return nil + } updatedWork := work.DeepCopy() @@ -128,14 +136,13 @@ func (s *AgentInformerWatcherStore) HandleReceivedWork(action types.ResourceActi return s.Update(updatedWork) case types.Deleted: // the manifestwork is deleting on the source, we just update its deletion timestamp. - lastWork, err := s.Get(work.Namespace, work.Name) - if errors.IsNotFound(err) { - return nil - } - + lastWork, exists, err := s.Get(work.Namespace, work.Name) if err != nil { return err } + if !exists { + return nil + } updatedWork := lastWork.DeepCopy() updatedWork.DeletionTimestamp = work.DeletionTimestamp diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go index 90f7c6f90..0b7f0c227 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/interface.go @@ -39,13 +39,13 @@ type WorkClientWatcherStore interface { Delete(work *workv1.ManifestWork) error // List returns the works from store for a given namespace with list options - List(namespace string, opts metav1.ListOptions) ([]*workv1.ManifestWork, error) + List(namespace string, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) // ListAll list all of the works from store ListAll() ([]*workv1.ManifestWork, error) // Get returns a work from store with work namespace and name - Get(namespace, name string) (*workv1.ManifestWork, error) + Get(namespace, name string) (*workv1.ManifestWork, bool, error) // HasInitiated marks the store has been initiated, A resync may be required after the store is initiated // when building a work client. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go index b56aa3ddf..5353f8ca2 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/store/local.go @@ -46,8 +46,8 @@ func NewSourceLocalWatcherStore(ctx context.Context, listFunc ListLocalWorksFunc // A local store to cache the works store := cache.NewStore(cache.MetaNamespaceKeyFunc) for _, work := range works { - if err := utils.Validate(work); err != nil { - return nil, err + if errs := utils.Validate(work); len(errs) != 0 { + return nil, fmt.Errorf(errs.ToAggregate().Error()) } if err := store.Add(work.DeepCopy()); err != nil { diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go index 0e3c1df01..36376339b 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go @@ -1,6 +1,7 @@ package utils import ( + "bytes" "encoding/json" "fmt" @@ -9,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -115,7 +117,7 @@ func ListWorksWithOptions(store cache.Store, namespace string, opts metav1.ListO return works, nil } -func Validate(work *workv1.ManifestWork) error { +func Validate(work *workv1.ManifestWork) field.ErrorList { fldPath := field.NewPath("metadata") errs := field.ErrorList{} @@ -151,9 +153,26 @@ func Validate(work *workv1.ManifestWork) error { errs = append(errs, field.Invalid(field.NewPath("spec"), "spec", err.Error())) } - if len(errs) == 0 { - return nil + return errs +} + +// Encode ensures the given work's manifests are encoded +func Encode(work *workv1.ManifestWork) error { + for index, manifest := range work.Spec.Workload.Manifests { + if manifest.Raw == nil { + if manifest.Object == nil { + return fmt.Errorf("the Object and Raw of the manifest[%d] for the work (%s/%s) are both `nil`", + index, work.Namespace, work.Name) + } + + var buf bytes.Buffer + if err := unstructured.UnstructuredJSONScheme.Encode(manifest.Object, &buf); err != nil { + return err + } + + work.Spec.Workload.Manifests[index].Raw = buf.Bytes() + } } - return fmt.Errorf(errs.ToAggregate().Error()) + return nil }