From d68b42a527d38be098e49595d6c7533cccedd3b9 Mon Sep 17 00:00:00 2001 From: Guillermo Gaston Date: Tue, 11 Apr 2023 14:09:45 +0000 Subject: [PATCH] Use kubectl for kube-proxy upgrader calls Before the kube-proxy upgrader (https://github.com/aws/eks-anywhere/pull/5345) all API calls to the management cluster came either from kubectl or clusterctl, which both happened to be run in a docker container in the admin machine. This was the first piece of code that introduced the use of Kubernetes Go client directly from the CLI binary. This means that if a user was relying on this internal implementation (explicit interface vs implicit interface), their system could break if it wasn't setup to give the CLI network connectivity to the kind cluster. This PR "reverts" the addition of that new paradigm byt changing the underlying client implementation to use kubectl commands. --- Makefile | 3 +- internal/test/kubernetes.go | 28 +- pkg/clients/kubernetes/client.go | 73 +++++ pkg/clients/kubernetes/client_test.go | 95 ++++++ pkg/clients/kubernetes/kubeconfig.go | 36 ++- pkg/clients/kubernetes/kubeconfig_test.go | 123 +++++++- pkg/clients/kubernetes/kubectl.go | 89 ++++++ pkg/clients/kubernetes/kubectl_test.go | 181 +++++++++++ pkg/clients/kubernetes/mocks/client.go | 295 ++++++++++++++++++ pkg/clients/kubernetes/mocks/kubeconfig.go | 45 --- pkg/clients/kubernetes/mocks/kubectl.go | 159 ++++++++-- pkg/clients/kubernetes/scheme.go | 3 + pkg/clients/kubernetes/unauth.go | 86 ++++- pkg/clients/kubernetes/unauth_test.go | 209 +++++++++++-- pkg/clustermanager/kube_proxy.go | 55 ++-- pkg/clustermanager/kube_proxy_test.go | 53 ++-- pkg/clustermanager/mocks/kube_proxy.go | 6 +- pkg/controller/clientutil/kubernetes.go | 36 +++ pkg/controller/clientutil/kubernetes_test.go | 146 +++++++++ pkg/controller/clusterapi.go | 15 +- pkg/dependencies/factory.go | 4 +- pkg/dependencies/factory_test.go | 1 + pkg/executables/kubectl.go | 232 +++++++++++--- pkg/executables/kubectl_getter_helper_test.go | 1 + pkg/executables/kubectl_test.go | 262 +++++++++++++++- pkg/providers/snow/mocks/client.go | 14 - pkg/providers/snow/snow.go | 8 +- pkg/providers/snow/snow_test.go | 18 +- test/e2e/metallb.go | 13 +- test/framework/vspherecsi.go | 21 +- 30 files changed, 2042 insertions(+), 268 deletions(-) create mode 100644 pkg/clients/kubernetes/client.go create mode 100644 pkg/clients/kubernetes/client_test.go create mode 100644 pkg/clients/kubernetes/kubectl.go create mode 100644 pkg/clients/kubernetes/kubectl_test.go create mode 100644 pkg/clients/kubernetes/mocks/client.go diff --git a/Makefile b/Makefile index 82a6699bbde12..67b1f7565c2ec 100644 --- a/Makefile +++ b/Makefile @@ -579,7 +579,8 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/curatedpackages/mocks/packageinstaller.go -package=mocks -source "pkg/curatedpackages/packageinstaller.go" PackageController PackageHandler ${MOCKGEN} -destination=pkg/curatedpackages/mocks/reader.go -package=mocks -source "pkg/curatedpackages/bundle.go" Reader BundleRegistry ${MOCKGEN} -destination=pkg/curatedpackages/mocks/bundlemanager.go -package=mocks -source "pkg/curatedpackages/bundlemanager.go" Manager - ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/kubectl.go -package=mocks -source "pkg/clients/kubernetes/unauth.go" + ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/client.go -package=mocks -source "pkg/clients/kubernetes/client.go" + ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/kubectl.go -package=mocks -source "pkg/clients/kubernetes/kubectl.go" ${MOCKGEN} -destination=pkg/clients/kubernetes/mocks/kubeconfig.go -package=mocks -source "pkg/clients/kubernetes/kubeconfig.go" ${MOCKGEN} -destination=pkg/curatedpackages/mocks/installer.go -package=mocks -source "pkg/curatedpackages/packagecontrollerclient.go" ChartManager ClientBuilder ${MOCKGEN} -destination=pkg/curatedpackages/mocks/kube_client.go -package=mocks -mock_names Client=MockKubeClient sigs.k8s.io/controller-runtime/pkg/client Client diff --git a/internal/test/kubernetes.go b/internal/test/kubernetes.go index 0a062e49a0602..a1861cf0f25fe 100644 --- a/internal/test/kubernetes.go +++ b/internal/test/kubernetes.go @@ -1,41 +1,29 @@ package test import ( - "context" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" _ "github.com/aws/eks-anywhere/internal/test/envtest" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/controller/clientutil" ) -// KubeClient implements kubernetes.Client by using client.Client. -type KubeClient struct { - client client.Client -} - -func NewKubeClient(client client.Client) *KubeClient { - return &KubeClient{ - client: client, - } -} - -// Get retrieves an obj for the given name and namespace from the Kubernetes Cluster. -func (c *KubeClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error { - return c.client.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj) +// NewKubeClient builds a new kubernetes.Client by using client.Client. +func NewKubeClient(client client.Client) kubernetes.Client { + return clientutil.NewKubeClient(client) } -// NewFakeKubeClient returns a KubeClient that uses a fake client.Client under the hood. -func NewFakeKubeClient(objs ...client.Object) *KubeClient { +// NewFakeKubeClient returns a kubernetes.Client that uses a fake client.Client under the hood. +func NewFakeKubeClient(objs ...client.Object) kubernetes.Client { return NewKubeClient(fake.NewClientBuilder().WithObjects(objs...).Build()) } -// NewFakeKubeClientAlwaysError returns a KubeClient that will always fail in any operation +// NewFakeKubeClientAlwaysError returns a kubernetes.Client that will always fail in any operation // This is achieved by injecting an empty Scheme, which will make the underlying client.Client // incapable of determining the resource type for a particular client.Object. -func NewFakeKubeClientAlwaysError(objs ...client.Object) *KubeClient { +func NewFakeKubeClientAlwaysError(objs ...client.Object) kubernetes.Client { return NewKubeClient( fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(objs...).Build(), ) diff --git a/pkg/clients/kubernetes/client.go b/pkg/clients/kubernetes/client.go new file mode 100644 index 0000000000000..e4adcd255ad25 --- /dev/null +++ b/pkg/clients/kubernetes/client.go @@ -0,0 +1,73 @@ +package kubernetes + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Object is a Kubernetes object. +type Object client.Object + +// ObjectList is a Kubernetes object list. +type ObjectList client.ObjectList + +// Client is Kubernetes API client. +type Client interface { + Reader + Writer +} + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given name and namespace from the Kubernetes Cluster. + Get(ctx context.Context, name, namespace string, obj Object) error + + // List retrieves list of objects. On a successful call, Items field + // in the list will be populated with the result returned from the server. + List(ctx context.Context, list ObjectList) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object) error + + // Update updates the given obj in the Kubernetes cluster. + Update(ctx context.Context, obj Object) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +type DeleteAllOfOptions struct { + // HasLabels filters results by label and value. The requirement is an AND match + // for all labels. + HasLabels map[string]string + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + if o.HasLabels != nil { + do.HasLabels = o.HasLabels + } + if o.Namespace != "" { + do.Namespace = o.Namespace + } +} diff --git a/pkg/clients/kubernetes/client_test.go b/pkg/clients/kubernetes/client_test.go new file mode 100644 index 0000000000000..a56c5ee2c242e --- /dev/null +++ b/pkg/clients/kubernetes/client_test.go @@ -0,0 +1,95 @@ +package kubernetes_test + +import ( + "testing" + + . "github.com/onsi/gomega" + + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" +) + +func TestDeleteAllOfOptionsApplyToDeleteAllOf(t *testing.T) { + tests := []struct { + name string + option, in, want *kubernetes.DeleteAllOfOptions + }{ + { + name: "empty", + option: &kubernetes.DeleteAllOfOptions{}, + in: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + }, + { + name: "only Namespace", + option: &kubernetes.DeleteAllOfOptions{ + Namespace: "other-ns", + }, + in: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "other-ns", + }, + }, + { + name: "Namespace and labels", + option: &kubernetes.DeleteAllOfOptions{ + Namespace: "other-ns", + HasLabels: map[string]string{ + "label2": "value2", + }, + }, + in: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label2": "value2", + }, + Namespace: "other-ns", + }, + }, + { + name: "empty not nil labels", + option: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{}, + }, + in: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.DeleteAllOfOptions{ + HasLabels: map[string]string{}, + Namespace: "ns", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + tt.option.ApplyToDeleteAllOf(tt.in) + g.Expect(tt.in).To(BeComparableTo(tt.want)) + }) + } +} diff --git a/pkg/clients/kubernetes/kubeconfig.go b/pkg/clients/kubernetes/kubeconfig.go index e843e25ecb398..bd8673a5faf0c 100644 --- a/pkg/clients/kubernetes/kubeconfig.go +++ b/pkg/clients/kubernetes/kubeconfig.go @@ -2,18 +2,8 @@ package kubernetes import ( "context" - - "sigs.k8s.io/controller-runtime/pkg/client" ) -type Client interface { - Get(ctx context.Context, name, namespace string, obj Object) error -} - -type Object client.Object - -type ObjectList client.ObjectList - // KubeconfigClient is an authenticated kubernetes API client // it authenticates using the credentials of a kubeconfig file. type KubeconfigClient struct { @@ -33,3 +23,29 @@ func NewKubeconfigClient(client *UnAuthClient, kubeconfig string) *KubeconfigCli func (c *KubeconfigClient) Get(ctx context.Context, name, namespace string, obj Object) error { return c.client.Get(ctx, name, namespace, c.kubeconfig, obj) } + +// List retrieves list of objects. On a successful call, Items field +// in the list will be populated with the result returned from the server. +func (c *KubeconfigClient) List(ctx context.Context, list ObjectList) error { + return c.client.List(ctx, c.kubeconfig, list) +} + +// Create saves the object obj in the Kubernetes cluster. +func (c *KubeconfigClient) Create(ctx context.Context, obj Object) error { + return c.client.Create(ctx, c.kubeconfig, obj) +} + +// Update updates the given obj in the Kubernetes cluster. +func (c *KubeconfigClient) Update(ctx context.Context, obj Object) error { + return c.client.Update(ctx, c.kubeconfig, obj) +} + +// Delete deletes the given obj from Kubernetes cluster. +func (c *KubeconfigClient) Delete(ctx context.Context, obj Object) error { + return c.client.Delete(ctx, c.kubeconfig, obj) +} + +// DeleteAllOf deletes all objects of the given type matching the given options. +func (c *KubeconfigClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, c.kubeconfig, obj, opts...) +} diff --git a/pkg/clients/kubernetes/kubeconfig_test.go b/pkg/clients/kubernetes/kubeconfig_test.go index 9a23da96858ba..a78e99fa29933 100644 --- a/pkg/clients/kubernetes/kubeconfig_test.go +++ b/pkg/clients/kubernetes/kubeconfig_test.go @@ -6,6 +6,8 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" @@ -16,7 +18,7 @@ func TestKubeconfigClientGet(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) kubeconfig := "k.kubeconfig" name := "eksa cluster" @@ -24,11 +26,126 @@ func TestKubeconfigClientGet(t *testing.T) { obj := &anywherev1.Cluster{} wantResourceType := "Cluster.v1alpha1.anywhere.eks.amazonaws.com" - kubectl.EXPECT().GetObject(ctx, wantResourceType, name, namespace, kubeconfig, obj) + kubectl.EXPECT().Get( + ctx, wantResourceType, kubeconfig, obj, + &kubernetes.KubectlGetOptions{Name: name, Namespace: namespace}, + ) c := kubernetes.NewUnAuthClient(kubectl) g.Expect(c.Init()).To(Succeed()) - kc := c.KubeconfigClient(kubeconfig) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) g.Expect(kc.Get(ctx, name, namespace, obj)).To(Succeed()) } + +func TestKubeconfigClientList(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + list := &corev1.NodeList{} + + kubectl.EXPECT().Get(ctx, "Node", kubeconfig, list) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(kc.List(ctx, list)).To(Succeed()) +} + +func TestKubeconfigClientCreate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectl.EXPECT().Create(ctx, kubeconfig, obj) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(kc.Create(ctx, obj)).To(Succeed()) +} + +func TestKubeconfigClientUpdate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectl.EXPECT().Replace(ctx, kubeconfig, obj) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(kc.Update(ctx, obj)).To(Succeed()) +} + +func TestKubeconfigClientDelete(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + Namespace: "my-ns", + }, + } + + opts := &kubernetes.KubectlDeleteOptions{ + Name: "my-pod", + Namespace: "my-ns", + } + kubectl.EXPECT().Delete(ctx, "Pod", kubeconfig, opts) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(kc.Delete(ctx, obj)).To(Succeed()) +} + +func TestKubeconfigClientDeleteAllOf(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectlOpts := &kubernetes.KubectlDeleteOptions{ + Namespace: "my-ns", + HasLabels: map[string]string{ + "k": "v", + }, + } + kubectl.EXPECT().Delete(ctx, "Pod", kubeconfig, kubectlOpts) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + kc, err := c.BuildClientFromKubeconfig(kubeconfig) + g.Expect(err).NotTo(HaveOccurred()) + + deleteOpts := &kubernetes.DeleteAllOfOptions{ + Namespace: "my-ns", + HasLabels: map[string]string{ + "k": "v", + }, + } + g.Expect(kc.DeleteAllOf(ctx, obj, deleteOpts)).To(Succeed()) +} diff --git a/pkg/clients/kubernetes/kubectl.go b/pkg/clients/kubernetes/kubectl.go new file mode 100644 index 0000000000000..005eaf9239946 --- /dev/null +++ b/pkg/clients/kubernetes/kubectl.go @@ -0,0 +1,89 @@ +package kubernetes + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Kubectl is a client implemented with the kubectl binary. +type Kubectl interface { + Get(ctx context.Context, resourceType, kubeconfig string, obj runtime.Object, opts ...KubectlGetOption) error + Create(ctx context.Context, kubeconfig string, obj runtime.Object) error + Replace(ctx context.Context, kubeconfig string, obj runtime.Object) error + Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error + Delete(ctx context.Context, resourceType, kubeconfig string, opts ...KubectlDeleteOption) error +} + +// KubectlGetOption is some configuration that modifies options for a get request. +type KubectlGetOption interface { + // ApplyToGet applies this configuration to the given get options. + ApplyToGet(*KubectlGetOptions) +} + +// KubectlGetOptions contains options for get commands. +type KubectlGetOptions struct { + // Name specifies the name of a resource. If set, only one single resource + // will be returned (at most). If set, Namespace is required. + Name string + + // Namespace specifies the namespace to retrieve objects from. If not set, + // all namespaces will be used. + Namespace string + + // ClusterScoped identifies the resourced as no namespaced. This is mutually exclusive with + // Namespace and requires to also specify a Name. + ClusterScoped *bool +} + +var _ KubectlGetOption = &KubectlGetOptions{} + +// ApplyToGet applies this configuration to the given get options. +func (o *KubectlGetOptions) ApplyToGet(kgo *KubectlGetOptions) { + if o.Name != "" { + kgo.Name = o.Name + } + if o.Namespace != "" { + kgo.Namespace = o.Namespace + } + if o.ClusterScoped != nil { + kgo.ClusterScoped = o.ClusterScoped + } +} + +// KubectlDeleteOption is some configuration that modifies options for a get request. +type KubectlDeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*KubectlDeleteOptions) +} + +// KubectlDeleteOptions contains options for delete commands. +type KubectlDeleteOptions struct { + // Name specifies the name of a resource. Use to delete a single resource. + // If set, Namespace is required. + Name string + + // Namespace specifies the namespace to delete objects from. If not set, + // all namespaces will be used. + Namespace string + + // HasLabels applies a filter using labels to the objects to be deleted. + // When multiple label-value pairs are specified, the condition is an AND + // for all of them. If specified, Name should be empty. + HasLabels map[string]string +} + +var _ KubectlDeleteOption = &KubectlDeleteOptions{} + +// ApplyToDelete applies this configuration to the given delete options. +func (o *KubectlDeleteOptions) ApplyToDelete(kdo *KubectlDeleteOptions) { + if o.Name != "" { + kdo.Name = o.Name + } + if o.Namespace != "" { + kdo.Namespace = o.Namespace + } + if o.HasLabels != nil { + kdo.HasLabels = o.HasLabels + } +} diff --git a/pkg/clients/kubernetes/kubectl_test.go b/pkg/clients/kubernetes/kubectl_test.go new file mode 100644 index 0000000000000..5b868fde39eb8 --- /dev/null +++ b/pkg/clients/kubernetes/kubectl_test.go @@ -0,0 +1,181 @@ +package kubernetes_test + +import ( + "testing" + + . "github.com/onsi/gomega" + + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/utils/ptr" +) + +func TestKubectlGetOptionsApplyToGet(t *testing.T) { + tests := []struct { + name string + option, in, want *kubernetes.KubectlGetOptions + }{ + { + name: "empty", + option: &kubernetes.KubectlGetOptions{}, + in: &kubernetes.KubectlGetOptions{ + Name: "my-name", + Namespace: "ns", + }, + want: &kubernetes.KubectlGetOptions{ + Name: "my-name", + Namespace: "ns", + }, + }, + { + name: "only Namespace", + option: &kubernetes.KubectlGetOptions{ + Namespace: "other-ns", + }, + in: &kubernetes.KubectlGetOptions{ + Name: "my-name", + Namespace: "ns", + }, + want: &kubernetes.KubectlGetOptions{ + Name: "my-name", + Namespace: "other-ns", + }, + }, + { + name: "Namespace and Name", + option: &kubernetes.KubectlGetOptions{ + Name: "my-other-name", + Namespace: "other-ns", + }, + in: &kubernetes.KubectlGetOptions{ + Name: "my-name", + Namespace: "ns", + }, + want: &kubernetes.KubectlGetOptions{ + Name: "my-other-name", + Namespace: "other-ns", + }, + }, + { + name: "Name and ClusterScope", + option: &kubernetes.KubectlGetOptions{ + Name: "my-other-name", + ClusterScoped: ptr.Bool(true), + }, + in: &kubernetes.KubectlGetOptions{ + Name: "my-name", + }, + want: &kubernetes.KubectlGetOptions{ + Name: "my-other-name", + ClusterScoped: ptr.Bool(true), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + tt.option.ApplyToGet(tt.in) + g.Expect(tt.in).To(BeComparableTo(tt.want)) + }) + } +} + +func TestKubectlDeleteOptionsApplyToDelete(t *testing.T) { + tests := []struct { + name string + option, in, want *kubernetes.KubectlDeleteOptions + }{ + { + name: "empty", + option: &kubernetes.KubectlDeleteOptions{}, + in: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + }, + { + name: "only Namespace", + option: &kubernetes.KubectlDeleteOptions{ + Namespace: "other-ns", + }, + in: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "other-ns", + }, + }, + { + name: "Namespace and labels", + option: &kubernetes.KubectlDeleteOptions{ + Namespace: "other-ns", + HasLabels: map[string]string{ + "label2": "value2", + }, + }, + in: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label2": "value2", + }, + Namespace: "other-ns", + }, + }, + { + name: "empty not nil labels", + option: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{}, + }, + in: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{ + "label": "value", + }, + Namespace: "ns", + }, + want: &kubernetes.KubectlDeleteOptions{ + HasLabels: map[string]string{}, + Namespace: "ns", + }, + }, + { + name: "Namespace and Name", + option: &kubernetes.KubectlDeleteOptions{ + Name: "my-other-name", + Namespace: "other-ns", + }, + in: &kubernetes.KubectlDeleteOptions{ + Name: "my-name", + Namespace: "ns", + }, + want: &kubernetes.KubectlDeleteOptions{ + Name: "my-other-name", + Namespace: "other-ns", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + tt.option.ApplyToDelete(tt.in) + g.Expect(tt.in).To(BeComparableTo(tt.want)) + }) + } +} diff --git a/pkg/clients/kubernetes/mocks/client.go b/pkg/clients/kubernetes/mocks/client.go new file mode 100644 index 0000000000000..64728c28d1768 --- /dev/null +++ b/pkg/clients/kubernetes/mocks/client.go @@ -0,0 +1,295 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/clients/kubernetes/client.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockClient) Create(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockClientMockRecorder) Create(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockClient)(nil).Create), ctx, obj) +} + +// Delete mocks base method. +func (m *MockClient) Delete(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockClientMockRecorder) Delete(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), ctx, obj) +} + +// DeleteAllOf mocks base method. +func (m *MockClient) DeleteAllOf(ctx context.Context, obj kubernetes.Object, opts ...kubernetes.DeleteAllOfOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, obj} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllOf indicates an expected call of DeleteAllOf. +func (mr *MockClientMockRecorder) DeleteAllOf(ctx, obj interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, obj}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockClient)(nil).DeleteAllOf), varargs...) +} + +// Get mocks base method. +func (m *MockClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, name, namespace, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockClientMockRecorder) Get(ctx, name, namespace, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), ctx, name, namespace, obj) +} + +// List mocks base method. +func (m *MockClient) List(ctx context.Context, list kubernetes.ObjectList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, list) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockClientMockRecorder) List(ctx, list interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClient)(nil).List), ctx, list) +} + +// Update mocks base method. +func (m *MockClient) Update(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockClientMockRecorder) Update(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockClient)(nil).Update), ctx, obj) +} + +// MockReader is a mock of Reader interface. +type MockReader struct { + ctrl *gomock.Controller + recorder *MockReaderMockRecorder +} + +// MockReaderMockRecorder is the mock recorder for MockReader. +type MockReaderMockRecorder struct { + mock *MockReader +} + +// NewMockReader creates a new mock instance. +func NewMockReader(ctrl *gomock.Controller) *MockReader { + mock := &MockReader{ctrl: ctrl} + mock.recorder = &MockReaderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockReader) EXPECT() *MockReaderMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockReader) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, name, namespace, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockReaderMockRecorder) Get(ctx, name, namespace, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockReader)(nil).Get), ctx, name, namespace, obj) +} + +// List mocks base method. +func (m *MockReader) List(ctx context.Context, list kubernetes.ObjectList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, list) + ret0, _ := ret[0].(error) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockReaderMockRecorder) List(ctx, list interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockReader)(nil).List), ctx, list) +} + +// MockWriter is a mock of Writer interface. +type MockWriter struct { + ctrl *gomock.Controller + recorder *MockWriterMockRecorder +} + +// MockWriterMockRecorder is the mock recorder for MockWriter. +type MockWriterMockRecorder struct { + mock *MockWriter +} + +// NewMockWriter creates a new mock instance. +func NewMockWriter(ctrl *gomock.Controller) *MockWriter { + mock := &MockWriter{ctrl: ctrl} + mock.recorder = &MockWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWriter) EXPECT() *MockWriterMockRecorder { + return m.recorder +} + +// Create mocks base method. +func (m *MockWriter) Create(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockWriterMockRecorder) Create(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockWriter)(nil).Create), ctx, obj) +} + +// Delete mocks base method. +func (m *MockWriter) Delete(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockWriterMockRecorder) Delete(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockWriter)(nil).Delete), ctx, obj) +} + +// DeleteAllOf mocks base method. +func (m *MockWriter) DeleteAllOf(ctx context.Context, obj kubernetes.Object, opts ...kubernetes.DeleteAllOfOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, obj} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAllOf indicates an expected call of DeleteAllOf. +func (mr *MockWriterMockRecorder) DeleteAllOf(ctx, obj interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, obj}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockWriter)(nil).DeleteAllOf), varargs...) +} + +// Update mocks base method. +func (m *MockWriter) Update(ctx context.Context, obj kubernetes.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockWriterMockRecorder) Update(ctx, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockWriter)(nil).Update), ctx, obj) +} + +// MockDeleteAllOfOption is a mock of DeleteAllOfOption interface. +type MockDeleteAllOfOption struct { + ctrl *gomock.Controller + recorder *MockDeleteAllOfOptionMockRecorder +} + +// MockDeleteAllOfOptionMockRecorder is the mock recorder for MockDeleteAllOfOption. +type MockDeleteAllOfOptionMockRecorder struct { + mock *MockDeleteAllOfOption +} + +// NewMockDeleteAllOfOption creates a new mock instance. +func NewMockDeleteAllOfOption(ctrl *gomock.Controller) *MockDeleteAllOfOption { + mock := &MockDeleteAllOfOption{ctrl: ctrl} + mock.recorder = &MockDeleteAllOfOptionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeleteAllOfOption) EXPECT() *MockDeleteAllOfOptionMockRecorder { + return m.recorder +} + +// ApplyToDeleteAllOf mocks base method. +func (m *MockDeleteAllOfOption) ApplyToDeleteAllOf(arg0 *kubernetes.DeleteAllOfOptions) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ApplyToDeleteAllOf", arg0) +} + +// ApplyToDeleteAllOf indicates an expected call of ApplyToDeleteAllOf. +func (mr *MockDeleteAllOfOptionMockRecorder) ApplyToDeleteAllOf(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyToDeleteAllOf", reflect.TypeOf((*MockDeleteAllOfOption)(nil).ApplyToDeleteAllOf), arg0) +} diff --git a/pkg/clients/kubernetes/mocks/kubeconfig.go b/pkg/clients/kubernetes/mocks/kubeconfig.go index 75977fbec6692..f75e4b8d7c72f 100644 --- a/pkg/clients/kubernetes/mocks/kubeconfig.go +++ b/pkg/clients/kubernetes/mocks/kubeconfig.go @@ -3,48 +3,3 @@ // Package mocks is a generated GoMock package. package mocks - -import ( - context "context" - reflect "reflect" - - kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes" - gomock "github.com/golang/mock/gomock" -) - -// MockClient is a mock of Client interface. -type MockClient struct { - ctrl *gomock.Controller - recorder *MockClientMockRecorder -} - -// MockClientMockRecorder is the mock recorder for MockClient. -type MockClientMockRecorder struct { - mock *MockClient -} - -// NewMockClient creates a new mock instance. -func NewMockClient(ctrl *gomock.Controller) *MockClient { - mock := &MockClient{ctrl: ctrl} - mock.recorder = &MockClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClient) EXPECT() *MockClientMockRecorder { - return m.recorder -} - -// Get mocks base method. -func (m *MockClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, name, namespace, obj) - ret0, _ := ret[0].(error) - return ret0 -} - -// Get indicates an expected call of Get. -func (mr *MockClientMockRecorder) Get(ctx, name, namespace, obj interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), ctx, name, namespace, obj) -} diff --git a/pkg/clients/kubernetes/mocks/kubectl.go b/pkg/clients/kubernetes/mocks/kubectl.go index 4f86e4324d390..eb8efe6a4e445 100644 --- a/pkg/clients/kubernetes/mocks/kubectl.go +++ b/pkg/clients/kubernetes/mocks/kubectl.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: pkg/clients/kubernetes/unauth.go +// Source: pkg/clients/kubernetes/kubectl.go // Package mocks is a generated GoMock package. package mocks @@ -8,35 +8,36 @@ import ( context "context" reflect "reflect" + kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes" gomock "github.com/golang/mock/gomock" runtime "k8s.io/apimachinery/pkg/runtime" ) -// MockKubectlGetter is a mock of KubectlGetter interface. -type MockKubectlGetter struct { +// MockKubectl is a mock of Kubectl interface. +type MockKubectl struct { ctrl *gomock.Controller - recorder *MockKubectlGetterMockRecorder + recorder *MockKubectlMockRecorder } -// MockKubectlGetterMockRecorder is the mock recorder for MockKubectlGetter. -type MockKubectlGetterMockRecorder struct { - mock *MockKubectlGetter +// MockKubectlMockRecorder is the mock recorder for MockKubectl. +type MockKubectlMockRecorder struct { + mock *MockKubectl } -// NewMockKubectlGetter creates a new mock instance. -func NewMockKubectlGetter(ctrl *gomock.Controller) *MockKubectlGetter { - mock := &MockKubectlGetter{ctrl: ctrl} - mock.recorder = &MockKubectlGetterMockRecorder{mock} +// NewMockKubectl creates a new mock instance. +func NewMockKubectl(ctrl *gomock.Controller) *MockKubectl { + mock := &MockKubectl{ctrl: ctrl} + mock.recorder = &MockKubectlMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockKubectlGetter) EXPECT() *MockKubectlGetterMockRecorder { +func (m *MockKubectl) EXPECT() *MockKubectlMockRecorder { return m.recorder } // Apply mocks base method. -func (m *MockKubectlGetter) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { +func (m *MockKubectl) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Apply", ctx, kubeconfig, obj) ret0, _ := ret[0].(error) @@ -44,35 +45,143 @@ func (m *MockKubectlGetter) Apply(ctx context.Context, kubeconfig string, obj ru } // Apply indicates an expected call of Apply. -func (mr *MockKubectlGetterMockRecorder) Apply(ctx, kubeconfig, obj interface{}) *gomock.Call { +func (mr *MockKubectlMockRecorder) Apply(ctx, kubeconfig, obj interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubectlGetter)(nil).Apply), ctx, kubeconfig, obj) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubectl)(nil).Apply), ctx, kubeconfig, obj) +} + +// Create mocks base method. +func (m *MockKubectl) Create(ctx context.Context, kubeconfig string, obj runtime.Object) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", ctx, kubeconfig, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create. +func (mr *MockKubectlMockRecorder) Create(ctx, kubeconfig, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubectl)(nil).Create), ctx, kubeconfig, obj) } // Delete mocks base method. -func (m *MockKubectlGetter) Delete(ctx context.Context, resourceType, name, namespace, kubeconfig string) error { +func (m *MockKubectl) Delete(ctx context.Context, resourceType, kubeconfig string, opts ...kubernetes.KubectlDeleteOption) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceType, name, namespace, kubeconfig) + varargs := []interface{}{ctx, resourceType, kubeconfig} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Delete", varargs...) ret0, _ := ret[0].(error) return ret0 } // Delete indicates an expected call of Delete. -func (mr *MockKubectlGetterMockRecorder) Delete(ctx, resourceType, name, namespace, kubeconfig interface{}) *gomock.Call { +func (mr *MockKubectlMockRecorder) Delete(ctx, resourceType, kubeconfig interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, resourceType, kubeconfig}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubectl)(nil).Delete), varargs...) +} + +// Get mocks base method. +func (m *MockKubectl) Get(ctx context.Context, resourceType, kubeconfig string, obj runtime.Object, opts ...kubernetes.KubectlGetOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, resourceType, kubeconfig, obj} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Get", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockKubectlMockRecorder) Get(ctx, resourceType, kubeconfig, obj interface{}, opts ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubectlGetter)(nil).Delete), ctx, resourceType, name, namespace, kubeconfig) + varargs := append([]interface{}{ctx, resourceType, kubeconfig, obj}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubectl)(nil).Get), varargs...) } -// GetObject mocks base method. -func (m *MockKubectlGetter) GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error { +// Replace mocks base method. +func (m *MockKubectl) Replace(ctx context.Context, kubeconfig string, obj runtime.Object) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObject", ctx, resourceType, name, namespace, kubeconfig, obj) + ret := m.ctrl.Call(m, "Replace", ctx, kubeconfig, obj) ret0, _ := ret[0].(error) return ret0 } -// GetObject indicates an expected call of GetObject. -func (mr *MockKubectlGetterMockRecorder) GetObject(ctx, resourceType, name, namespace, kubeconfig, obj interface{}) *gomock.Call { +// Replace indicates an expected call of Replace. +func (mr *MockKubectlMockRecorder) Replace(ctx, kubeconfig, obj interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Replace", reflect.TypeOf((*MockKubectl)(nil).Replace), ctx, kubeconfig, obj) +} + +// MockKubectlGetOption is a mock of KubectlGetOption interface. +type MockKubectlGetOption struct { + ctrl *gomock.Controller + recorder *MockKubectlGetOptionMockRecorder +} + +// MockKubectlGetOptionMockRecorder is the mock recorder for MockKubectlGetOption. +type MockKubectlGetOptionMockRecorder struct { + mock *MockKubectlGetOption +} + +// NewMockKubectlGetOption creates a new mock instance. +func NewMockKubectlGetOption(ctrl *gomock.Controller) *MockKubectlGetOption { + mock := &MockKubectlGetOption{ctrl: ctrl} + mock.recorder = &MockKubectlGetOptionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKubectlGetOption) EXPECT() *MockKubectlGetOptionMockRecorder { + return m.recorder +} + +// ApplyToGet mocks base method. +func (m *MockKubectlGetOption) ApplyToGet(arg0 *kubernetes.KubectlGetOptions) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ApplyToGet", arg0) +} + +// ApplyToGet indicates an expected call of ApplyToGet. +func (mr *MockKubectlGetOptionMockRecorder) ApplyToGet(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyToGet", reflect.TypeOf((*MockKubectlGetOption)(nil).ApplyToGet), arg0) +} + +// MockKubectlDeleteOption is a mock of KubectlDeleteOption interface. +type MockKubectlDeleteOption struct { + ctrl *gomock.Controller + recorder *MockKubectlDeleteOptionMockRecorder +} + +// MockKubectlDeleteOptionMockRecorder is the mock recorder for MockKubectlDeleteOption. +type MockKubectlDeleteOptionMockRecorder struct { + mock *MockKubectlDeleteOption +} + +// NewMockKubectlDeleteOption creates a new mock instance. +func NewMockKubectlDeleteOption(ctrl *gomock.Controller) *MockKubectlDeleteOption { + mock := &MockKubectlDeleteOption{ctrl: ctrl} + mock.recorder = &MockKubectlDeleteOptionMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKubectlDeleteOption) EXPECT() *MockKubectlDeleteOptionMockRecorder { + return m.recorder +} + +// ApplyToDelete mocks base method. +func (m *MockKubectlDeleteOption) ApplyToDelete(arg0 *kubernetes.KubectlDeleteOptions) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ApplyToDelete", arg0) +} + +// ApplyToDelete indicates an expected call of ApplyToDelete. +func (mr *MockKubectlDeleteOptionMockRecorder) ApplyToDelete(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockKubectlGetter)(nil).GetObject), ctx, resourceType, name, namespace, kubeconfig, obj) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyToDelete", reflect.TypeOf((*MockKubectlDeleteOption)(nil).ApplyToDelete), arg0) } diff --git a/pkg/clients/kubernetes/scheme.go b/pkg/clients/kubernetes/scheme.go index 72199bdf3fb6e..701d3d7de6134 100644 --- a/pkg/clients/kubernetes/scheme.go +++ b/pkg/clients/kubernetes/scheme.go @@ -5,6 +5,7 @@ import ( etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1" "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2" vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -21,6 +22,8 @@ import ( type schemeAdder func(s *runtime.Scheme) error var schemeAdders = []schemeAdder{ + // clientgoscheme adds all the native K8s kinds + clientgoscheme.AddToScheme, clusterv1.AddToScheme, controlplanev1.AddToScheme, anywherev1.AddToScheme, diff --git a/pkg/clients/kubernetes/unauth.go b/pkg/clients/kubernetes/unauth.go index 212da1887cf77..1dd70636ff369 100644 --- a/pkg/clients/kubernetes/unauth.go +++ b/pkg/clients/kubernetes/unauth.go @@ -3,26 +3,23 @@ package kubernetes import ( "context" "fmt" + "strings" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -type KubectlGetter interface { - GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error - Delete(ctx context.Context, resourceType, name, namespace, kubeconfig string) error - Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error -} - // UnAuthClient is a generic kubernetes API client that takes a kubeconfig // file on every call in order to authenticate. type UnAuthClient struct { - kubectl KubectlGetter + kubectl Kubectl scheme *runtime.Scheme } -func NewUnAuthClient(kubectl KubectlGetter) *UnAuthClient { +// NewUnAuthClient builds a new UnAuthClient. +func NewUnAuthClient(kubectl Kubectl) *UnAuthClient { return &UnAuthClient{ kubectl: kubectl, scheme: runtime.NewScheme(), @@ -44,7 +41,7 @@ func (c *UnAuthClient) Get(ctx context.Context, name, namespace, kubeconfig stri return fmt.Errorf("getting kubernetes resource: %v", err) } - return c.kubectl.GetObject(ctx, resourceType, name, namespace, kubeconfig, obj) + return c.kubectl.Get(ctx, resourceType, kubeconfig, obj, &KubectlGetOptions{Name: name, Namespace: namespace}) } // KubeconfigClient returns an equivalent authenticated client. @@ -52,18 +49,69 @@ func (c *UnAuthClient) KubeconfigClient(kubeconfig string) Client { return NewKubeconfigClient(c, kubeconfig) } -// Delete performs a DELETE call to the kube API server authenticating with a kubeconfig file. -func (c *UnAuthClient) Delete(ctx context.Context, name, namespace, kubeconfig string, obj runtime.Object) error { +// BuildClientFromKubeconfig returns an equivalent authenticated client. It will never return +// an error but this helps satisfy a generic factory interface where errors are possible. It's +// basically an alias to KubeconfigClient. +func (c *UnAuthClient) BuildClientFromKubeconfig(kubeconfig string) (Client, error) { + return c.KubeconfigClient(kubeconfig), nil +} + +// Apply performs an upsert in the form of a client-side apply. +func (c *UnAuthClient) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { + return c.kubectl.Apply(ctx, kubeconfig, obj) +} + +// List retrieves list of objects. On a successful call, Items field +// in the list will be populated with the result returned from the server. +func (c *UnAuthClient) List(ctx context.Context, kubeconfig string, list ObjectList) error { + resourceType, err := c.resourceTypeForObj(list) + if err != nil { + return fmt.Errorf("getting kubernetes resource: %v", err) + } + + return c.kubectl.Get(ctx, resourceType, kubeconfig, list) +} + +// Create saves the object obj in the Kubernetes cluster. +func (c *UnAuthClient) Create(ctx context.Context, kubeconfig string, obj Object) error { + return c.kubectl.Create(ctx, kubeconfig, obj) +} + +// Update updates the given obj in the Kubernetes cluster. +func (c *UnAuthClient) Update(ctx context.Context, kubeconfig string, obj Object) error { + return c.kubectl.Replace(ctx, kubeconfig, obj) +} + +// Delete deletes the given obj from Kubernetes cluster. +func (c *UnAuthClient) Delete(ctx context.Context, kubeconfig string, obj Object) error { resourceType, err := c.resourceTypeForObj(obj) if err != nil { return fmt.Errorf("deleting kubernetes resource: %v", err) } - return c.kubectl.Delete(ctx, resourceType, name, namespace, kubeconfig) + o := &KubectlDeleteOptions{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return c.kubectl.Delete(ctx, resourceType, kubeconfig, o) } -func (c *UnAuthClient) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { - return c.kubectl.Apply(ctx, kubeconfig, obj) +// DeleteAllOf deletes all objects of the given type matching the given options. +func (c *UnAuthClient) DeleteAllOf(ctx context.Context, kubeconfig string, obj Object, opts ...DeleteAllOfOption) error { + resourceType, err := c.resourceTypeForObj(obj) + if err != nil { + return fmt.Errorf("deleting kubernetes resource: %v", err) + } + + deleteAllOpts := &DeleteAllOfOptions{} + for _, opt := range opts { + opt.ApplyToDeleteAllOf(deleteAllOpts) + } + + o := &KubectlDeleteOptions{} + o.Namespace = deleteAllOpts.Namespace + o.HasLabels = deleteAllOpts.HasLabels + return c.kubectl.Delete(ctx, resourceType, kubeconfig, o) } func (c *UnAuthClient) resourceTypeForObj(obj runtime.Object) (string, error) { @@ -72,9 +120,19 @@ func (c *UnAuthClient) resourceTypeForObj(obj runtime.Object) (string, error) { return "", err } + if meta.IsListType(obj) && strings.HasSuffix(groupVersionKind.Kind, "List") { + // if obj is a list, treat it as a request for the "individual" item's resource + groupVersionKind.Kind = groupVersionKind.Kind[:len(groupVersionKind.Kind)-4] + } + return groupVersionToKubectlResourceType(groupVersionKind), nil } func groupVersionToKubectlResourceType(g schema.GroupVersionKind) string { + if g.Group == "" { + // if Group is not set, this probably an obj from "core", which api group is just v1 + return g.Kind + } + return fmt.Sprintf("%s.%s.%s", g.Kind, g.Version, g.Group) } diff --git a/pkg/clients/kubernetes/unauth_test.go b/pkg/clients/kubernetes/unauth_test.go index c3cc15dec7ed6..a806328dfd941 100644 --- a/pkg/clients/kubernetes/unauth_test.go +++ b/pkg/clients/kubernetes/unauth_test.go @@ -6,10 +6,12 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clusterapiv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" @@ -41,16 +43,26 @@ func TestUnAuthClientGetSuccess(t *testing.T) { obj: &controlplanev1.KubeadmControlPlane{}, wantResourceType: "KubeadmControlPlane.v1beta1.controlplane.cluster.x-k8s.io", }, + { + name: "my-node", + namespace: "", + obj: &corev1.NodeList{}, + wantResourceType: "Node", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) kubeconfig := "k.kubeconfig" - kubectl.EXPECT().GetObject(ctx, tt.wantResourceType, tt.name, tt.namespace, kubeconfig, tt.obj) + o := &kubernetes.KubectlGetOptions{ + Name: tt.name, + Namespace: tt.namespace, + } + kubectl.EXPECT().Get(ctx, tt.wantResourceType, kubeconfig, tt.obj, o) c := kubernetes.NewUnAuthClient(kubectl) g.Expect(c.Init()).To(Succeed()) @@ -64,7 +76,7 @@ func TestUnAuthClientGetUnknownObjType(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) c := kubernetes.NewUnAuthClient(kubectl) g.Expect(c.Init()).To(Succeed()) @@ -75,20 +87,40 @@ func TestUnAuthClientGetUnknownObjType(t *testing.T) { func TestUnAuthClientDeleteSuccess(t *testing.T) { tests := []struct { name string - namespace string - obj runtime.Object + obj client.Object wantResourceType string + wantOpts []interface{} }{ { - name: "eksa cluster", - namespace: "eksa-system", - obj: &anywherev1.Cluster{}, + name: "eksa cluster", + obj: &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eksa-cluster", + Namespace: "eksa-system", + }, + }, + wantOpts: []interface{}{ + &kubernetes.KubectlDeleteOptions{ + Name: "eksa-cluster", + Namespace: "eksa-system", + }, + }, wantResourceType: "Cluster.v1alpha1.anywhere.eks.amazonaws.com", }, { - name: "capi cluster", - namespace: "eksa-system", - obj: &clusterapiv1.Cluster{}, + name: "capi cluster", + obj: &clusterapiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-cluster", + Namespace: "eksa-system", + }, + }, + wantOpts: []interface{}{ + &kubernetes.KubectlDeleteOptions{ + Name: "capi-cluster", + Namespace: "eksa-system", + }, + }, wantResourceType: "Cluster.v1beta1.cluster.x-k8s.io", }, } @@ -97,15 +129,15 @@ func TestUnAuthClientDeleteSuccess(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) kubeconfig := "k.kubeconfig" - kubectl.EXPECT().Delete(ctx, tt.wantResourceType, tt.name, tt.namespace, kubeconfig) + kubectl.EXPECT().Delete(ctx, tt.wantResourceType, kubeconfig, tt.wantOpts...) c := kubernetes.NewUnAuthClient(kubectl) g.Expect(c.Init()).To(Succeed()) - g.Expect(c.Delete(ctx, tt.name, tt.namespace, kubeconfig, tt.obj)).To(Succeed()) + g.Expect(c.Delete(ctx, kubeconfig, tt.obj)).To(Succeed()) }) } } @@ -114,7 +146,7 @@ func TestUnAuthClientApplySuccess(t *testing.T) { tests := []struct { name string namespace string - obj runtime.Object + obj client.Object }{ { name: "eksa cluster", @@ -132,7 +164,7 @@ func TestUnAuthClientApplySuccess(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) kubeconfig := "k.kubeconfig" kubectl.EXPECT().Apply(ctx, kubeconfig, tt.obj) @@ -149,10 +181,151 @@ func TestUnAuthClientDeleteUnknownObjType(t *testing.T) { g := NewWithT(t) ctx := context.Background() ctrl := gomock.NewController(t) - kubectl := mocks.NewMockKubectlGetter(ctrl) + kubectl := mocks.NewMockKubectl(ctrl) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + + g.Expect(c.Delete(ctx, "kubeconfig", &unknownType{})).Error() +} + +type unknownType struct { + metav1.TypeMeta + metav1.ObjectMeta +} + +func (*unknownType) DeepCopyObject() runtime.Object { + return nil +} + +func TestUnauthClientList(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + list := &corev1.NodeList{} + + kubectl.EXPECT().Get(ctx, "Node", kubeconfig, list) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + + g.Expect(c.List(ctx, kubeconfig, list)).To(Succeed()) +} + +func TestUnauthClientCreate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectl.EXPECT().Create(ctx, kubeconfig, obj) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + + g.Expect(c.Create(ctx, kubeconfig, obj)).To(Succeed()) +} + +func TestUnauthClientUpdate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectl.EXPECT().Replace(ctx, kubeconfig, obj) c := kubernetes.NewUnAuthClient(kubectl) g.Expect(c.Init()).To(Succeed()) - g.Expect(c.Delete(ctx, "name", "namespace", "kubeconfig", &metav1.Status{})).Error() + g.Expect(c.Update(ctx, kubeconfig, obj)).To(Succeed()) +} + +func TestUnauthClientDelete(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + Namespace: "my-ns", + }, + } + + opts := &kubernetes.KubectlDeleteOptions{ + Name: "my-pod", + Namespace: "my-ns", + } + kubectl.EXPECT().Delete(ctx, "Pod", kubeconfig, opts) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + + g.Expect(c.Delete(ctx, kubeconfig, obj)).To(Succeed()) +} + +func TestUnauthClientDeleteAllOf(t *testing.T) { + tests := []struct { + name string + opts []kubernetes.DeleteAllOfOption + wantKubectlOpt *kubernetes.KubectlDeleteOptions + }{ + { + name: "no options", + wantKubectlOpt: &kubernetes.KubectlDeleteOptions{}, + }, + { + name: "delete all in namespace", + opts: []kubernetes.DeleteAllOfOption{ + &kubernetes.DeleteAllOfOptions{ + Namespace: "my-ns", + }, + }, + wantKubectlOpt: &kubernetes.KubectlDeleteOptions{ + Namespace: "my-ns", + }, + }, + { + name: "delete all in namespace with label selector", + opts: []kubernetes.DeleteAllOfOption{ + &kubernetes.DeleteAllOfOptions{ + Namespace: "my-ns", + HasLabels: map[string]string{ + "label": "value", + }, + }, + }, + wantKubectlOpt: &kubernetes.KubectlDeleteOptions{ + Namespace: "my-ns", + HasLabels: map[string]string{ + "label": "value", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockKubectl(ctrl) + kubeconfig := "k.kubeconfig" + obj := &corev1.Pod{} + + kubectl.EXPECT().Delete(ctx, "Pod", kubeconfig, tt.wantKubectlOpt) + + c := kubernetes.NewUnAuthClient(kubectl) + g.Expect(c.Init()).To(Succeed()) + + g.Expect(c.DeleteAllOf(ctx, kubeconfig, obj, tt.opts...)).To(Succeed()) + }) + } } diff --git a/pkg/clustermanager/kube_proxy.go b/pkg/clustermanager/kube_proxy.go index 22711da57ae2c..3ed586af4a619 100644 --- a/pkg/clustermanager/kube_proxy.go +++ b/pkg/clustermanager/kube_proxy.go @@ -14,9 +14,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" @@ -43,7 +43,7 @@ var firstEKSDWithNewKubeProxy = map[anywherev1.KubernetesVersion]int{ // ClientFactory builds Kubernetes clients. type ClientFactory interface { // BuildClientFromKubeconfig builds a Kubernetes client from a kubeconfig file. - BuildClientFromKubeconfig(kubeconfigPath string) (client.Client, error) + BuildClientFromKubeconfig(kubeconfigPath string) (kubernetes.Client, error) } // NewKubeProxyCLIUpgrader builds a new KubeProxyCLIUpgrader. @@ -122,7 +122,7 @@ func (u KubeProxyCLIUpgrader) CleanupAfterUpgrade(ctx context.Context, func (u KubeProxyCLIUpgrader) buildClients( managementClusterKubeconfigPath, workloadClusterKubeconfigPath string, -) (managementClusterClient, workloadClusterClient client.Client, err error) { +) (managementClusterClient, workloadClusterClient kubernetes.Client, err error) { u.log.V(4).Info("Building client for management cluster", "kubeconfig", managementClusterKubeconfigPath) if err = u.retrier.Retry(func() error { managementClusterClient, err = u.clientFactory.BuildClientFromKubeconfig(managementClusterKubeconfigPath) @@ -179,8 +179,8 @@ func WithUpdateKubeProxyTiming(retries int, backoff time.Duration) KubeProxyUpgr // old kube-proxy that always uses iptables legacy and the new one that detects the host preference // and is able to work with nft as well. This is idempotent, so it can be called in a loop if transient // errors are a risk. -func (u KubeProxyUpgrader) PrepareForUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient client.Client, spec *cluster.Spec) error { - kcp, err := controller.KubeadmControlPlane(ctx, managementClusterClient, spec.Cluster) +func (u KubeProxyUpgrader) PrepareForUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient kubernetes.Client, spec *cluster.Spec) error { + kcp, err := getKubeadmControlPlane(ctx, managementClusterClient, spec.Cluster) if err != nil { return errors.Wrap(err, "reading the kubeadm control plane for an upgrade") } @@ -235,7 +235,7 @@ func (u KubeProxyUpgrader) PrepareForUpgrade(ctx context.Context, log logr.Logge // CleanupAfterUpgrade cleanups all the leftover changes made by PrepareForUpgrade. // It's idempotent so it can be call multiple timesm even if PrepareForUpgrade wasn't // called before. -func (u KubeProxyUpgrader) CleanupAfterUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient client.Client, spec *cluster.Spec) error { +func (u KubeProxyUpgrader) CleanupAfterUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient kubernetes.Client, spec *cluster.Spec) error { log.V(4).Info("Deleting iptables legacy kube-proxy", "name", iptablesLegacyKubeProxyDSName) if err := deleteIPTablesLegacyKubeProxy(ctx, workloadClusterClient); err != nil { return err @@ -256,7 +256,7 @@ func (u KubeProxyUpgrader) CleanupAfterUpgrade(ctx context.Context, log logr.Log } // Remove the skip annotation from the kubeadm control plane so it starts reconciling the kube-proxy again - kcp, err := controller.KubeadmControlPlane(ctx, managementClusterClient, spec.Cluster) + kcp, err := getKubeadmControlPlane(ctx, managementClusterClient, spec.Cluster) if err != nil { return errors.Wrap(err, "reading the kubeadm control plane to cleanup the skip annotations") } @@ -313,7 +313,7 @@ func needsKubeProxyPreUpgrade(spec *cluster.Spec, currentKCP *controlplanev1.Kub return specIncludesNewKubeProxy(spec) && !eksdIncludesNewKubeProxy(currentKubeVersion, currentEKSDNumber), nil } -func annotateKCPWithSKipKubeProxy(ctx context.Context, log logr.Logger, c client.Client, kcp *controlplanev1.KubeadmControlPlane) error { +func annotateKCPWithSKipKubeProxy(ctx context.Context, log logr.Logger, c kubernetes.Client, kcp *controlplanev1.KubeadmControlPlane) error { log.V(4).Info("Adding skip annotation to kcp", "kcp", klog.KObj(kcp), "annotation", controlplanev1.SkipKubeProxyAnnotation) clientutil.AddAnnotation(kcp, controlplanev1.SkipKubeProxyAnnotation, "true") if err := c.Update(ctx, kcp); err != nil { @@ -323,7 +323,7 @@ func annotateKCPWithSKipKubeProxy(ctx context.Context, log logr.Logger, c client return nil } -func addIPTablesLegacyLabelToAllNodes(ctx context.Context, log logr.Logger, client client.Client) error { +func addIPTablesLegacyLabelToAllNodes(ctx context.Context, log logr.Logger, client kubernetes.Client) error { nodeList := &corev1.NodeList{} if err := client.List(ctx, nodeList); err != nil { return errors.Wrap(err, "listing workload cluster nodes for kube-proxy upgrade") @@ -346,17 +346,26 @@ func addIPTablesLegacyLabelToAllNodes(ctx context.Context, log logr.Logger, clie return nil } -func getKubeProxy(ctx context.Context, c client.Client) (*appsv1.DaemonSet, error) { +func getKubeProxy(ctx context.Context, c kubernetes.Client) (*appsv1.DaemonSet, error) { kubeProxy := &appsv1.DaemonSet{} - kubeProxyKey := client.ObjectKey{Name: kubeProxyDSName, Namespace: kubeProxyDSNamespace} - if err := c.Get(ctx, kubeProxyKey, kubeProxy); err != nil { + if err := c.Get(ctx, kubeProxyDSName, kubeProxyDSNamespace, kubeProxy); err != nil { return nil, errors.Wrap(err, "reading kube-proxy for upgrade") } return kubeProxy, nil } -func addAntiNodeAffinityToKubeProxy(ctx context.Context, client client.Client, kubeProxy *appsv1.DaemonSet) error { +func getKubeadmControlPlane(ctx context.Context, c kubernetes.Client, cluster *anywherev1.Cluster) (*controlplanev1.KubeadmControlPlane, error) { + key := controller.CAPIKubeadmControlPlaneKey(cluster) + + kubeadmControlPlane := &controlplanev1.KubeadmControlPlane{} + if err := c.Get(ctx, key.Name, key.Namespace, kubeadmControlPlane); err != nil { + return nil, err + } + return kubeadmControlPlane, nil +} + +func addAntiNodeAffinityToKubeProxy(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet) error { kubeProxy.Spec.Template.Spec.Affinity = &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ @@ -380,11 +389,13 @@ func addAntiNodeAffinityToKubeProxy(ctx context.Context, client client.Client, k return nil } -func deleteAllOriginalKubeProxyPods(ctx context.Context, c client.Client) error { +func deleteAllOriginalKubeProxyPods(ctx context.Context, c kubernetes.Client) error { if err := c.DeleteAllOf(ctx, &corev1.Pod{}, - client.InNamespace(kubeProxyDSNamespace), - client.MatchingLabels{ - k8sAppLabel: kubeProxyLabel, + &kubernetes.DeleteAllOfOptions{ + Namespace: kubeProxyDSNamespace, + HasLabels: map[string]string{ + k8sAppLabel: kubeProxyLabel, + }, }, ); err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "deleting kube-proxy pods before upgrade") @@ -393,7 +404,7 @@ func deleteAllOriginalKubeProxyPods(ctx context.Context, c client.Client) error return nil } -func restrictKubeProxyToNewNodes(ctx context.Context, client client.Client, kubeProxy *appsv1.DaemonSet) error { +func restrictKubeProxyToNewNodes(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet) error { kubeProxy = kubeProxy.DeepCopy() // Add nodeAffinity to kube-proxy so it's not scheduled in new nodes without our label if err := addAntiNodeAffinityToKubeProxy(ctx, client, kubeProxy); err != nil { @@ -440,7 +451,7 @@ func iptablesLegacyKubeProxyFromCurrentDaemonSet(kcp *controlplanev1.KubeadmCont return iptablesLegacyKubeProxy } -func createIPTablesLegacyKubeProxy(ctx context.Context, client client.Client, kcp *controlplanev1.KubeadmControlPlane, originalKubeProxy *appsv1.DaemonSet) error { +func createIPTablesLegacyKubeProxy(ctx context.Context, client kubernetes.Client, kcp *controlplanev1.KubeadmControlPlane, originalKubeProxy *appsv1.DaemonSet) error { iptablesLegacyKubeProxy := iptablesLegacyKubeProxyFromCurrentDaemonSet(kcp, originalKubeProxy) if err := client.Create(ctx, iptablesLegacyKubeProxy); err != nil && !apierrors.IsAlreadyExists(err) { return errors.Wrap(err, "creating secondary kube-proxy DS with iptables-legacy for old nodes") @@ -449,7 +460,7 @@ func createIPTablesLegacyKubeProxy(ctx context.Context, client client.Client, kc return nil } -func deleteIPTablesLegacyKubeProxy(ctx context.Context, client client.Client) error { +func deleteIPTablesLegacyKubeProxy(ctx context.Context, client kubernetes.Client) error { iptablesLegacyKubeProxy := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: iptablesLegacyKubeProxyDSName, @@ -464,7 +475,7 @@ func deleteIPTablesLegacyKubeProxy(ctx context.Context, client client.Client) er return nil } -func updateKubeProxyVersion(ctx context.Context, client client.Client, kubeProxy *appsv1.DaemonSet, image string) error { +func updateKubeProxyVersion(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet, image string) error { kubeProxy.Spec.Template.Spec.Containers[0].Image = image if err := client.Update(ctx, kubeProxy); err != nil { return errors.Wrap(err, "updating main kube-proxy version before upgrade") @@ -473,7 +484,7 @@ func updateKubeProxyVersion(ctx context.Context, client client.Client, kubeProxy return nil } -func (u KubeProxyUpgrader) ensureUpdateKubeProxyVersion(ctx context.Context, log logr.Logger, client client.Client, spec *cluster.Spec) error { +func (u KubeProxyUpgrader) ensureUpdateKubeProxyVersion(ctx context.Context, log logr.Logger, client kubernetes.Client, spec *cluster.Spec) error { newKubeProxyImage := spec.VersionsBundle.KubeDistro.KubeProxy.URI return retrier.Retry(u.updateKubeProxyRetries, u.updateKubeProxyBackoff, func() error { kubeProxy, err := getKubeProxy(ctx, client) diff --git a/pkg/clustermanager/kube_proxy_test.go b/pkg/clustermanager/kube_proxy_test.go index b1306204aa45b..f1fc710e62b0f 100644 --- a/pkg/clustermanager/kube_proxy_test.go +++ b/pkg/clustermanager/kube_proxy_test.go @@ -20,6 +20,7 @@ import ( "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/clustermanager" @@ -30,17 +31,23 @@ import ( ) type prepareKubeProxyTest struct { - ctx context.Context - log logr.Logger - spec *cluster.Spec - kcp *controlplanev1.KubeadmControlPlane - kubeProxy *appsv1.DaemonSet - nodeCP *corev1.Node - nodeWorker *corev1.Node - kubeProxyCP *corev1.Pod - kubeProxyWorker *corev1.Pod - managementClient client.Client - workloadClient client.Client + ctx context.Context + log logr.Logger + spec *cluster.Spec + kcp *controlplanev1.KubeadmControlPlane + kubeProxy *appsv1.DaemonSet + nodeCP *corev1.Node + nodeWorker *corev1.Node + kubeProxyCP *corev1.Pod + kubeProxyWorker *corev1.Pod + managementClient kubernetes.Client + // managementImplClient is a controller-runtime client that serves as the + // underlying implementation for managementClient. + managementImplClient client.Client + workloadClient kubernetes.Client + // workloadImplClient is a controller-runtime client that serves as the + // underlying implementation for workloadClient. + workloadImplClient client.Client workloadClusterExtraObjects []client.Object } @@ -117,7 +124,8 @@ func newPrepareKubeProxyTest() *prepareKubeProxyTest { } func (tt *prepareKubeProxyTest) initClients(tb testing.TB) { - tt.managementClient = fake.NewClientBuilder().WithObjects(tt.kcp).Build() + tt.managementImplClient = fake.NewClientBuilder().WithObjects(tt.kcp).Build() + tt.managementClient = clientutil.NewKubeClient(tt.managementImplClient) objs := []client.Object{ tt.kubeProxy, @@ -128,7 +136,8 @@ func (tt *prepareKubeProxyTest) initClients(tb testing.TB) { } objs = append(objs, tt.workloadClusterExtraObjects...) - tt.workloadClient = fake.NewClientBuilder().WithObjects(objs...).Build() + tt.workloadImplClient = fake.NewClientBuilder().WithObjects(objs...).Build() + tt.workloadClient = clientutil.NewKubeClient(tt.workloadImplClient) } // startKCPControllerEmulator stars a routine that reverts the kube-proxy @@ -137,7 +146,7 @@ func (tt *prepareKubeProxyTest) initClients(tb testing.TB) { // keeps reverting the kube-proxy image tag. func (tt *prepareKubeProxyTest) startKCPControllerEmulator(tb testing.TB, times int) { go func() { - api := envtest.NewAPIExpecter(tb, tt.workloadClient) + api := envtest.NewAPIExpecter(tb, tt.workloadImplClient) kubeProxy := tt.kubeProxy.DeepCopy() originalImage := kubeProxy.Spec.Template.Spec.Containers[0].Image for i := 0; i < times; i++ { @@ -167,12 +176,12 @@ func TestKubeProxyUpgraderPrepareForUpgradeSuccess(t *testing.T) { u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec), ).To(Succeed()) - managementAPI := envtest.NewAPIExpecter(t, tt.managementClient) + managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient) managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) { g.Expect(tt.kcp.Annotations).To(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true")) }) - workloadAPI := envtest.NewAPIExpecter(t, tt.workloadClient) + workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient) workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) { image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18")) @@ -257,12 +266,12 @@ func TestKubeProxyUpgraderPrepareForUpgradeAlreadyUsingNewKubeProxy(t *testing.T u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec), ).To(Succeed()) - managementAPI := envtest.NewAPIExpecter(t, tt.managementClient) + managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient) managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) { g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true")) }) - workloadAPI := envtest.NewAPIExpecter(t, tt.workloadClient) + workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient) workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) { image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18")) @@ -301,12 +310,12 @@ func TestKubeProxyUpgraderPrepareForUpgradeNewSpecHasOldKubeProxy(t *testing.T) u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec), ).To(Succeed()) - managementAPI := envtest.NewAPIExpecter(t, tt.managementClient) + managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient) managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) { g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true")) }) - workloadAPI := envtest.NewAPIExpecter(t, tt.workloadClient) + workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient) workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) { image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-15")) @@ -348,12 +357,12 @@ func TestKubeProxyUpgraderCleanupAfterUpgradeSuccessWithReentry(t *testing.T) { u.CleanupAfterUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec), ).To(Succeed()) - managementAPI := envtest.NewAPIExpecter(t, tt.managementClient) + managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient) managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) { g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true")) }) - workloadAPI := envtest.NewAPIExpecter(t, tt.workloadClient) + workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient) workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) { g.Expect(tt.kubeProxy.Spec.Template.Spec.Affinity).To(BeNil()) }) diff --git a/pkg/clustermanager/mocks/kube_proxy.go b/pkg/clustermanager/mocks/kube_proxy.go index 59f13c57ca441..98a66de3c7879 100644 --- a/pkg/clustermanager/mocks/kube_proxy.go +++ b/pkg/clustermanager/mocks/kube_proxy.go @@ -7,8 +7,8 @@ package mocks import ( reflect "reflect" + kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes" gomock "github.com/golang/mock/gomock" - client "sigs.k8s.io/controller-runtime/pkg/client" ) // MockClientFactory is a mock of ClientFactory interface. @@ -35,10 +35,10 @@ func (m *MockClientFactory) EXPECT() *MockClientFactoryMockRecorder { } // BuildClientFromKubeconfig mocks base method. -func (m *MockClientFactory) BuildClientFromKubeconfig(kubeconfigPath string) (client.Client, error) { +func (m *MockClientFactory) BuildClientFromKubeconfig(kubeconfigPath string) (kubernetes.Client, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BuildClientFromKubeconfig", kubeconfigPath) - ret0, _ := ret[0].(client.Client) + ret0, _ := ret[0].(kubernetes.Client) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/pkg/controller/clientutil/kubernetes.go b/pkg/controller/clientutil/kubernetes.go index 6c56635890485..2d4412cdae5a1 100644 --- a/pkg/controller/clientutil/kubernetes.go +++ b/pkg/controller/clientutil/kubernetes.go @@ -3,6 +3,7 @@ package clientutil import ( "context" + "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" @@ -24,3 +25,38 @@ func NewKubeClient(client client.Client) *KubeClient { func (c *KubeClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error { return c.client.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj) } + +// List retrieves list of objects. On a successful call, Items field +// in the list will be populated with the result returned from the server. +func (c *KubeClient) List(ctx context.Context, list kubernetes.ObjectList) error { + return c.client.List(ctx, list) +} + +// Create saves the object obj in the Kubernetes cluster. +func (c *KubeClient) Create(ctx context.Context, obj kubernetes.Object) error { + return c.client.Create(ctx, obj) +} + +// Update updates the given obj in the Kubernetes cluster. +func (c *KubeClient) Update(ctx context.Context, obj kubernetes.Object) error { + return c.client.Update(ctx, obj) +} + +// Delete deletes the given obj from Kubernetes cluster. +func (c *KubeClient) Delete(ctx context.Context, obj kubernetes.Object) error { + return c.client.Delete(ctx, obj) +} + +// DeleteAllOf deletes all objects of the given type matching the given options. +func (c *KubeClient) DeleteAllOf(ctx context.Context, obj kubernetes.Object, opts ...kubernetes.DeleteAllOfOption) error { + o := &kubernetes.DeleteAllOfOptions{} + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + + clientOptions := &client.DeleteAllOfOptions{} + clientOptions.LabelSelector = labels.SelectorFromValidatedSet(o.HasLabels) + clientOptions.Namespace = o.Namespace + + return c.client.DeleteAllOf(ctx, obj, clientOptions) +} diff --git a/pkg/controller/clientutil/kubernetes_test.go b/pkg/controller/clientutil/kubernetes_test.go index 777fbf22062f1..0cace11cff351 100644 --- a/pkg/controller/clientutil/kubernetes_test.go +++ b/pkg/controller/clientutil/kubernetes_test.go @@ -8,8 +8,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "github.com/aws/eks-anywhere/internal/test/envtest" _ "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/controller/clientutil" ) @@ -45,3 +47,147 @@ func TestKubeClientGetNotFound(t *testing.T) { receiveCluster := &anywherev1.Cluster{} g.Expect(client.Get(ctx, "my-cluster", "default", receiveCluster)).Error() } + +func TestKubeClientList(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster1 := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cluster2 := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-2", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cb := fake.NewClientBuilder() + cl := cb.WithRuntimeObjects(cluster1, cluster2).Build() + + client := clientutil.NewKubeClient(cl) + receiveClusters := &anywherev1.ClusterList{} + g.Expect(client.List(ctx, receiveClusters)).To(Succeed()) + g.Expect(receiveClusters.Items).To(ConsistOf(*cluster1, *cluster2)) +} + +func TestKubeClientCreate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cb := fake.NewClientBuilder() + cl := cb.WithRuntimeObjects().Build() + + client := clientutil.NewKubeClient(cl) + g.Expect(client.Create(ctx, cluster)).To(Succeed()) + + api := envtest.NewAPIExpecter(t, cl) + api.ShouldEventuallyExist(ctx, cluster) +} + +func TestKubeClientUpdate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cb := fake.NewClientBuilder() + cl := cb.WithRuntimeObjects(cluster).Build() + + client := clientutil.NewKubeClient(cl) + + updatedCluster := cluster.DeepCopy() + updatedCluster.Spec.KubernetesVersion = anywherev1.Kube126 + g.Expect(client.Update(ctx, updatedCluster)).To(Succeed()) + + api := envtest.NewAPIExpecter(t, cl) + api.ShouldEventuallyMatch(ctx, cluster, func(g Gomega) { + g.Expect(cluster).To(BeComparableTo(updatedCluster)) + }) +} + +func TestKubeClientDelete(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cb := fake.NewClientBuilder() + cl := cb.WithRuntimeObjects(cluster).Build() + + client := clientutil.NewKubeClient(cl) + + g.Expect(client.Delete(ctx, cluster)).To(Succeed()) + + api := envtest.NewAPIExpecter(t, cl) + api.ShouldEventuallyNotExist(ctx, cluster) +} + +func TestKubeClientDeleteAllOf(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster1 := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cluster2 := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-2", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + Kind: anywherev1.ClusterKind, + APIVersion: anywherev1.GroupVersion.String(), + }, + } + cb := fake.NewClientBuilder() + cl := cb.WithRuntimeObjects(cluster1, cluster2).Build() + + client := clientutil.NewKubeClient(cl) + opts := &kubernetes.DeleteAllOfOptions{ + Namespace: "default", + } + g.Expect(client.DeleteAllOf(ctx, &anywherev1.Cluster{}, opts)).To(Succeed()) + + api := envtest.NewAPIExpecter(t, cl) + api.ShouldEventuallyNotExist(ctx, cluster1) + api.ShouldEventuallyNotExist(ctx, cluster2) +} diff --git a/pkg/controller/clusterapi.go b/pkg/controller/clusterapi.go index 4eb60ee5f1ca8..f49f8d61e03e1 100644 --- a/pkg/controller/clusterapi.go +++ b/pkg/controller/clusterapi.go @@ -59,17 +59,22 @@ func GetKubeadmControlPlane(ctx context.Context, client client.Client, cluster * // KubeadmControlPlane reads a cluster-api KubeadmControlPlane for an eks-a cluster using a kube client. func KubeadmControlPlane(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) (*controlplanev1.KubeadmControlPlane, error) { - kubeadmControlPlaneName := clusterapi.KubeadmControlPlaneName(cluster) - kubeadmControlPlane := &controlplanev1.KubeadmControlPlane{} - key := types.NamespacedName{Namespace: constants.EksaSystemNamespace, Name: kubeadmControlPlaneName} - - if err := client.Get(ctx, key, kubeadmControlPlane); err != nil { + if err := client.Get(ctx, CAPIKubeadmControlPlaneKey(cluster), kubeadmControlPlane); err != nil { return nil, err } return kubeadmControlPlane, nil } +// CAPIKubeadmControlPlaneKey generates an ObjectKey for the CAPI Kubeadm control plane owned by +// the provided eks-a cluster. +func CAPIKubeadmControlPlaneKey(cluster *anywherev1.Cluster) client.ObjectKey { + return client.ObjectKey{ + Name: clusterapi.KubeadmControlPlaneName(cluster), + Namespace: constants.EksaSystemNamespace, + } +} + // GetMachineDeployment reads a cluster-api MachineDeployment for an eks-a cluster using a kube client. // If the MachineDeployment is not found, the method returns (nil, nil). func GetMachineDeployment(ctx context.Context, client client.Client, machineDeploymentName string) (*clusterv1.MachineDeployment, error) { diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 1baf35c71ddc5..c828756056b36 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -1023,7 +1023,7 @@ func (f *Factory) WithEksdUpgrader() *Factory { // WithKubeProxyCLIUpgrader builds a KubeProxyCLIUpgrader. func (f *Factory) WithKubeProxyCLIUpgrader() *Factory { - f.WithLogger() + f.WithLogger().WithUnAuthKubeClient() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { var opts []clustermanager.KubeProxyCLIUpgraderOpt @@ -1033,7 +1033,7 @@ func (f *Factory) WithKubeProxyCLIUpgrader() *Factory { f.dependencies.KubeProxyCLIUpgrader = clustermanager.NewKubeProxyCLIUpgrader( f.dependencies.Logger, - kubernetes.ClientFactory{}, + f.dependencies.UnAuthKubeClient, opts..., ) return nil diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index 041958c71751b..d7f7f4949f174 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -487,6 +487,7 @@ func TestFactoryBuildWithCNIInstallerKindnetd(t *testing.T) { func TestFactoryBuildWithKubeProxyCLIUpgraderNoTimeout(t *testing.T) { tt := newTest(t, vsphere) deps, err := dependencies.NewFactory(). + WithLocalExecutables(). WithNoTimeouts(). WithKubeProxyCLIUpgrader(). Build(context.Background()) diff --git a/pkg/executables/kubectl.go b/pkg/executables/kubectl.go index f2df15501b604..3506c508394e2 100644 --- a/pkg/executables/kubectl.go +++ b/pkg/executables/kubectl.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "math" "regexp" @@ -16,6 +15,7 @@ import ( eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" + "github.com/pkg/errors" rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1" tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1" appsv1 "k8s.io/api/apps/v1" @@ -32,6 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1" @@ -43,6 +44,7 @@ import ( "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased" "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/types" + "github.com/aws/eks-anywhere/pkg/utils/ptr" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) @@ -2007,7 +2009,7 @@ func (k *Kubectl) KubeconfigSecretAvailable(ctx context.Context, kubeconfig stri // HasResource implements KubectlRunner. func (k *Kubectl) HasResource(ctx context.Context, resourceType string, name string, kubeconfig string, namespace string) (bool, error) { throwaway := &unstructured.Unstructured{} - err := k.get(ctx, resourceType, kubeconfig, throwaway, withGetResourceName(name), withGetNamespace(namespace)) + err := k.Get(ctx, resourceType, kubeconfig, throwaway, withGetResourceName(name), withNamespaceOrDefaultForGet(namespace)) if err != nil { return false, err } @@ -2018,64 +2020,65 @@ func (k *Kubectl) HasResource(ctx context.Context, resourceType string, name str // and unmarshalls the response into the provided Object // If the object is not found, it returns an error implementing apimachinery errors.APIStatus. func (k *Kubectl) GetObject(ctx context.Context, resourceType, name, namespace, kubeconfig string, obj runtime.Object) error { - return k.get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name), withGetNamespace(namespace)) + return k.Get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name), withNamespaceOrDefaultForGet(namespace)) } // GetClusterObject performs a GET class like above except without namespace required. func (k *Kubectl) GetClusterObject(ctx context.Context, resourceType, name, kubeconfig string, obj runtime.Object) error { - return k.get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name)) + return k.Get(ctx, resourceType, kubeconfig, obj, withGetResourceName(name), withClusterScope()) } func (k *Kubectl) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error { - return k.get(ctx, resourceType, kubeconfig, list, withGetNamespace(namespace)) + return k.Get(ctx, resourceType, kubeconfig, list, withNamespaceOrDefaultForGet(namespace)) } -type ( - getOption func(*getOptions) - getOptions struct { - name string - namespace string +func withGetResourceName(name string) kubernetes.KubectlGetOption { + return &kubernetes.KubectlGetOptions{ + Name: name, } -) +} -func withGetResourceName(name string) getOption { - return func(o *getOptions) { - o.name = name +// withNamespaceOrDefaultForGet returns an option for a get command to use the provided namespace +// or the default namespace if an empty string is provided. +// For backwards compatibility, we us the default namespace if this method is called explicitly +// with an empty namespace since some parts of the code rely on kubectl using the default namespace +// when no namespace argument is passed. +func withNamespaceOrDefaultForGet(namespace string) kubernetes.KubectlGetOption { + if namespace == "" { + namespace = "default" + } + return &kubernetes.KubectlGetOptions{ + Namespace: namespace, } } -func withGetNamespace(namespace string) getOption { - return func(o *getOptions) { - o.namespace = namespace +func withClusterScope() kubernetes.KubectlGetOption { + return &kubernetes.KubectlGetOptions{ + ClusterScoped: ptr.Bool(true), } } -func (k *Kubectl) get(ctx context.Context, resourceType, kubeconfig string, obj runtime.Object, opts ...getOption) error { - o := &getOptions{} +// Get performs a kubectl get command. +func (k *Kubectl) Get(ctx context.Context, resourceType, kubeconfig string, obj runtime.Object, opts ...kubernetes.KubectlGetOption) error { + o := &kubernetes.KubectlGetOptions{} for _, opt := range opts { - opt(o) + opt.ApplyToGet(o) } - params := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", kubeconfig, resourceType} - if o.namespace != "" { - params = append(params, "--namespace", o.namespace) - } - if o.name != "" { - params = append(params, o.name) + clusterScoped := o.ClusterScoped != nil && *o.ClusterScoped + + if o.Name != "" && o.Namespace == "" && !clusterScoped { + return errors.New("if Name is specified, Namespace is required") } + params := getParams(resourceType, kubeconfig, o) stdOut, err := k.Execute(ctx, params...) if err != nil { return fmt.Errorf("getting %s with kubectl: %v", resourceType, err) } if stdOut.Len() == 0 { - resourceTypeSplit := strings.SplitN(resourceType, ".", 2) - gr := schema.GroupResource{Resource: resourceTypeSplit[0]} - if len(resourceTypeSplit) == 2 { - gr.Group = resourceTypeSplit[1] - } - return apierrors.NewNotFound(gr, o.name) + return newNotFoundErrorForTypeAndName(resourceType, o.Name) } if err = json.Unmarshal(stdOut.Bytes(), obj); err != nil { @@ -2085,6 +2088,163 @@ func (k *Kubectl) get(ctx context.Context, resourceType, kubeconfig string, obj return nil } +func getParams(resourceType, kubeconfig string, o *kubernetes.KubectlGetOptions) []string { + clusterScoped := o.ClusterScoped != nil && *o.ClusterScoped + + params := []string{"get", "--ignore-not-found", "-o", "json", "--kubeconfig", kubeconfig, resourceType} + if o.Namespace != "" { + params = append(params, "--namespace", o.Namespace) + } else if !clusterScoped { + params = append(params, "--all-namespaces") + } + + if o.Name != "" { + params = append(params, o.Name) + } + + return params +} + +// Create performs a kubectl create command. +func (k *Kubectl) Create(ctx context.Context, kubeconfig string, obj runtime.Object) error { + b, err := yaml.Marshal(obj) + if err != nil { + return errors.Wrap(err, "marshalling object") + } + _, err = k.ExecuteWithStdin(ctx, b, "create", "-f", "-", "--kubeconfig", kubeconfig) + if isKubectlAlreadyExistsError(err) { + return newAlreadyExistsErrorForObj(obj) + } + + if err != nil { + return errors.Wrapf(err, "creating %s object with kubectl", obj.GetObjectKind().GroupVersionKind()) + } + return nil +} + +const alreadyExistsErrorMessageSubString = "AlreadyExists" + +func isKubectlAlreadyExistsError(err error) bool { + return err != nil && strings.Contains(err.Error(), alreadyExistsErrorMessageSubString) +} + +const notFoundErrorMessageSubString = "NotFound" + +func isKubectlNotFoundError(err error) bool { + return err != nil && strings.Contains(err.Error(), notFoundErrorMessageSubString) +} + +func newAlreadyExistsErrorForObj(obj runtime.Object) error { + return apierrors.NewAlreadyExists( + groupResourceFromObj(obj), + resourceNameFromObj(obj), + ) +} + +func groupResourceFromObj(obj runtime.Object) schema.GroupResource { + apiObj, ok := obj.(client.Object) + if !ok { + // If this doesn't implement the client object interface, + // we don't know how to process it. This should never happen for + // any of the known types. + return schema.GroupResource{} + } + + k := apiObj.GetObjectKind().GroupVersionKind() + return schema.GroupResource{ + Group: k.Group, + Resource: k.Kind, + } +} + +func resourceNameFromObj(obj runtime.Object) string { + apiObj, ok := obj.(client.Object) + if !ok { + // If this doesn't implement the client object interface, + // we don't know how to process it. This should never happen for + // any of the known types. + return "" + } + + return apiObj.GetName() +} + +func newNotFoundErrorForTypeAndName(resourceType, name string) error { + resourceTypeSplit := strings.SplitN(resourceType, ".", 2) + gr := schema.GroupResource{Resource: resourceTypeSplit[0]} + if len(resourceTypeSplit) == 2 { + gr.Group = resourceTypeSplit[1] + } + return apierrors.NewNotFound(gr, name) +} + +// Replace performs a kubectl replace command. +func (k *Kubectl) Replace(ctx context.Context, kubeconfig string, obj runtime.Object) error { + b, err := yaml.Marshal(obj) + if err != nil { + return errors.Wrap(err, "marshalling object") + } + if _, err := k.ExecuteWithStdin(ctx, b, "replace", "-f", "-", "--kubeconfig", kubeconfig); err != nil { + return errors.Wrapf(err, "replacing %s object with kubectl", obj.GetObjectKind().GroupVersionKind()) + } + return nil +} + +// Delete performs a delete command authenticating with a kubeconfig file. +func (k *Kubectl) Delete(ctx context.Context, resourceType, kubeconfig string, opts ...kubernetes.KubectlDeleteOption) error { + o := &kubernetes.KubectlDeleteOptions{} + for _, opt := range opts { + opt.ApplyToDelete(o) + } + + if o.Name != "" && o.Namespace == "" { + return errors.New("if Name is specified, Namespace is required") + } + + if o.Name != "" && o.HasLabels != nil { + return errors.New("options for HasLabels and Name are mutually exclusive") + } + + params := deleteParams(resourceType, kubeconfig, o) + _, err := k.Execute(ctx, params...) + if isKubectlNotFoundError(err) { + return newNotFoundErrorForTypeAndName(resourceType, o.Name) + } + + if err != nil { + return errors.Wrapf(err, "deleting %s", resourceType) + } + return nil +} + +func deleteParams(resourceType, kubeconfig string, o *kubernetes.KubectlDeleteOptions) []string { + params := []string{"delete", "--kubeconfig", kubeconfig, resourceType} + if o.Name != "" { + params = append(params, o.Name) + } else if o.HasLabels == nil { + params = append(params, "--all") + } + + if o.Namespace != "" { + params = append(params, "--namespace", o.Namespace) + } else { + params = append(params, "--all-namespaces") + } + + if len(o.HasLabels) > 0 { + labelConstrains := make([]string, 0, len(o.HasLabels)) + for l, v := range o.HasLabels { + labelConstrains = append(labelConstrains, l+"="+v) + } + + sort.Strings(labelConstrains) + + params = append(params, "--selector", strings.Join(labelConstrains, ",")) + } + + return params +} + func (k *Kubectl) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { b, err := yaml.Marshal(obj) if err != nil { @@ -2136,14 +2296,6 @@ func (k *Kubectl) ExecuteCommand(ctx context.Context, opts ...string) (bytes.Buf return k.Execute(ctx, opts...) } -// Delete performs a DELETE call to the kube API server authenticating with a kubeconfig file. -func (k *Kubectl) Delete(ctx context.Context, resourceType, name, namespace, kubeconfig string) error { - if _, err := k.Execute(ctx, "delete", resourceType, name, "--namespace", namespace, "--kubeconfig", kubeconfig); err != nil { - return fmt.Errorf("deleting %s %s in namespace %s: %v", name, resourceType, namespace, err) - } - return nil -} - // DeleteClusterObject performs a DELETE call like above except without namespace required. func (k *Kubectl) DeleteClusterObject(ctx context.Context, resourceType, name, kubeconfig string) error { if _, err := k.Execute(ctx, "delete", resourceType, name, "--kubeconfig", kubeconfig); err != nil { diff --git a/pkg/executables/kubectl_getter_helper_test.go b/pkg/executables/kubectl_getter_helper_test.go index a4fdf05bc664b..e9b9e811f4443 100644 --- a/pkg/executables/kubectl_getter_helper_test.go +++ b/pkg/executables/kubectl_getter_helper_test.go @@ -26,6 +26,7 @@ func newKubectlGetterTest(t *testing.T) *kubectlGetterTest { return &kubectlGetterTest{ kubectlTest: newKubectlTest(t), name: "name", + namespace: "my-ns", } } diff --git a/pkg/executables/kubectl_test.go b/pkg/executables/kubectl_test.go index 55f5c8359982a..e7a931601695e 100644 --- a/pkg/executables/kubectl_test.go +++ b/pkg/executables/kubectl_test.go @@ -29,6 +29,7 @@ import ( packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/executables" mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks" @@ -2433,7 +2434,7 @@ func TestKubectlGetStorageClassSuccess(t *testing.T) { func TestKubectlGetStorageClassError(t *testing.T) { t.Parallel() - newKubectlGetterTest(t).withResourceType( + newKubectlGetterTest(t).withoutNamespace().withResourceType( "storageclass", ).withGetter(func(tt *kubectlGetterTest) (client.Object, error) { return tt.k.GetStorageClass(tt.ctx, tt.name, tt.kubeconfig) @@ -2490,6 +2491,148 @@ func TestKubectlGetObjectNotFound(t *testing.T) { } } +func TestKubectlGetObjectWithEMptyNamespace(t *testing.T) { + tt := newKubectlTest(t) + name := "my-cluster" + emptyNamespace := "" + tt.e.EXPECT().Execute( + tt.ctx, + // Here we expect the command to have the default namespace set explicitly + "get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "cluster", "--namespace", "default", name, + ).Return(bytes.Buffer{}, nil) + + err := tt.k.GetObject(tt.ctx, "cluster", name, emptyNamespace, tt.kubeconfig, &clusterv1.Cluster{}) + tt.Expect(err).To(HaveOccurred()) + tt.Expect(apierrors.IsNotFound(err)).To(BeTrue()) +} + +func TestKubectlGetAllObjects(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + list := &v1alpha1.ClusterList{} + b, err := json.Marshal(list) + tt.Expect(err).To(Succeed()) + tt.e.EXPECT().Execute( + tt.ctx, + "get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--all-namespaces", + ).Return(*bytes.NewBuffer(b), nil) + + got := &v1alpha1.ClusterList{} + tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got)).To(Succeed()) + tt.Expect(got).To(BeComparableTo(list)) +} + +func TestKubectlGetAllObjectsInNamespace(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + list := &v1alpha1.ClusterList{} + b, err := json.Marshal(list) + tt.Expect(err).To(Succeed()) + tt.e.EXPECT().Execute( + tt.ctx, + "get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace, + ).Return(*bytes.NewBuffer(b), nil) + + opts := &kubernetes.KubectlGetOptions{ + Namespace: tt.namespace, + } + got := &v1alpha1.ClusterList{} + tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got, opts)).To(Succeed()) + tt.Expect(got).To(BeComparableTo(list)) +} + +func TestKubectlGetSingleObject(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + clusterName := "my-cluster" + cluster := &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + b, err := json.Marshal(cluster) + tt.Expect(err).To(Succeed()) + tt.e.EXPECT().Execute( + tt.ctx, + "get", "--ignore-not-found", "-o", "json", "--kubeconfig", tt.kubeconfig, "clusters", "--namespace", tt.namespace, clusterName, + ).Return(*bytes.NewBuffer(b), nil) + + opts := &kubernetes.KubectlGetOptions{ + Namespace: tt.namespace, + Name: clusterName, + } + got := &v1alpha1.Cluster{} + tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, got, opts)).To(Succeed()) + tt.Expect(got).To(BeComparableTo(cluster)) +} + +func TestKubectlGetWithNameAndWithoutNamespace(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + clusterName := "my-cluster" + + opts := &kubernetes.KubectlGetOptions{ + Name: clusterName, + } + + tt.Expect(tt.k.Get(tt.ctx, "clusters", tt.kubeconfig, &v1alpha1.Cluster{}, opts)).To( + MatchError(ContainSubstring("if Name is specified, Namespace is required")), + ) +} + +func TestKubectlCreateSuccess(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + secret := &corev1.Secret{} + b, err := yaml.Marshal(secret) + tt.Expect(err).To(Succeed()) + + tt.e.EXPECT().ExecuteWithStdin( + tt.ctx, + b, + "create", "-f", "-", "--kubeconfig", tt.kubeconfig, + ).Return(bytes.Buffer{}, nil) + + tt.Expect(tt.k.Create(tt.ctx, tt.kubeconfig, secret)).To(Succeed()) +} + +func TestKubectlCreateAlreadyExistsError(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + secret := &corev1.Secret{} + b, err := yaml.Marshal(secret) + tt.Expect(err).To(Succeed()) + + tt.e.EXPECT().ExecuteWithStdin( + tt.ctx, + b, + "create", "-f", "-", "--kubeconfig", tt.kubeconfig, + ).Return( + bytes.Buffer{}, + errors.New("Error from server (AlreadyExists): error when creating \"STDIN\": secret \"my-secret\" already exists\n"), //nolint:revive // The format of the message it's important here since the code checks for its content + ) + + err = tt.k.Create(tt.ctx, tt.kubeconfig, secret) + tt.Expect(err).To(HaveOccurred()) + tt.Expect(apierrors.IsAlreadyExists(err)).To(BeTrue(), "error should be an AlreadyExists apierror") +} + +func TestKubectlReplaceSuccess(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + secret := &corev1.Secret{} + b, err := yaml.Marshal(secret) + tt.Expect(err).To(Succeed()) + + tt.e.EXPECT().ExecuteWithStdin( + tt.ctx, + b, + "replace", "-f", "-", "--kubeconfig", tt.kubeconfig, + ).Return(bytes.Buffer{}, nil) + + tt.Expect(tt.k.Replace(tt.ctx, tt.kubeconfig, secret)).To(Succeed()) +} + func TestKubectlGetClusterObjectNotFound(t *testing.T) { t.Parallel() test := newKubectlTest(t) @@ -2740,17 +2883,128 @@ func TestGetUnprovisionedTinkerbellHardware_ExecutableErrors(t *testing.T) { tt.Expect(err).NotTo(BeNil()) } -func TestKubectlDelete(t *testing.T) { +func TestKubectlDeleteSingleObject(t *testing.T) { t.Parallel() tt := newKubectlTest(t) name := "my-cluster" resourceType := "cluster.x-k8s.io" tt.e.EXPECT().Execute( tt.ctx, - "delete", resourceType, name, "--namespace", tt.namespace, "--kubeconfig", tt.kubeconfig, + "delete", "--kubeconfig", tt.kubeconfig, resourceType, name, "--namespace", tt.namespace, + ).Return(bytes.Buffer{}, nil) + + opts := &kubernetes.KubectlDeleteOptions{ + Name: name, + Namespace: tt.namespace, + } + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed()) +} + +func TestKubectlDeleteAllObjectsInNamespace(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + resourceType := "cluster.x-k8s.io" + tt.e.EXPECT().Execute( + tt.ctx, + "delete", "--kubeconfig", tt.kubeconfig, resourceType, "--all", "--namespace", tt.namespace, + ).Return(bytes.Buffer{}, nil) + + opts := &kubernetes.KubectlDeleteOptions{ + Namespace: tt.namespace, + } + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed()) +} + +func TestKubectlDeleteAllObjectsInNamespaceWithLabels(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + resourceType := "cluster.x-k8s.io" + tt.e.EXPECT().Execute( + tt.ctx, + "delete", + "--kubeconfig", tt.kubeconfig, + resourceType, + "--namespace", tt.namespace, + "--selector", "label1=val1,label2=val2", + ).Return(bytes.Buffer{}, nil) + + opts := &kubernetes.KubectlDeleteOptions{ + Namespace: tt.namespace, + HasLabels: map[string]string{ + "label2": "val2", + "label1": "val1", + }, + } + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To(Succeed()) +} + +func TestKubectlDeleteAllObjectsInAllNamespaces(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + resourceType := "cluster.x-k8s.io" + tt.e.EXPECT().Execute( + tt.ctx, + "delete", "--kubeconfig", tt.kubeconfig, resourceType, "--all", "--all-namespaces", ).Return(bytes.Buffer{}, nil) - tt.Expect(tt.k.Delete(tt.ctx, resourceType, name, tt.namespace, tt.kubeconfig)).To(Succeed()) + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig)).To(Succeed()) +} + +func TestKubectlDeleteObjectWithNoNamespace(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + resourceType := "cluster.x-k8s.io" + clusterName := "my-cluster" + + opts := &kubernetes.KubectlDeleteOptions{ + Name: clusterName, + } + + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To( + MatchError(ContainSubstring("if Name is specified, Namespace is required")), + ) +} + +func TestKubectlDeleteObjectWithHasLabels(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + resourceType := "cluster.x-k8s.io" + clusterName := "my-cluster" + + opts := &kubernetes.KubectlDeleteOptions{ + Name: clusterName, + Namespace: tt.namespace, + HasLabels: map[string]string{ + "mylabel": "value", + }, + } + + tt.Expect(tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts)).To( + MatchError(ContainSubstring("options for HasLabels and Name are mutually exclusive")), + ) +} + +func TestKubectlDeleteObjectNotFoundError(t *testing.T) { + t.Parallel() + tt := newKubectlTest(t) + name := "my-cluster" + resourceType := "cluster.x-k8s.io" + tt.e.EXPECT().Execute( + tt.ctx, + "delete", "--kubeconfig", tt.kubeconfig, resourceType, name, "--namespace", tt.namespace, + ).Return( + bytes.Buffer{}, + errors.New("Error from server (NotFound): cluster \"my-cluster\" not found\n"), //nolint:revive // The format of the message it's important here since the code checks for its content + ) + + opts := &kubernetes.KubectlDeleteOptions{ + Name: name, + Namespace: tt.namespace, + } + + err := tt.k.Delete(tt.ctx, resourceType, tt.kubeconfig, opts) + tt.Expect(err).To(HaveOccurred()) + tt.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "error should be a NotFound apierror") } func TestKubectlDeleteClusterObject(t *testing.T) { diff --git a/pkg/providers/snow/mocks/client.go b/pkg/providers/snow/mocks/client.go index c1fcae73baa48..5dbabfd460ad1 100644 --- a/pkg/providers/snow/mocks/client.go +++ b/pkg/providers/snow/mocks/client.go @@ -50,20 +50,6 @@ func (mr *MockKubeUnAuthClientMockRecorder) Apply(ctx, kubeconfig, obj interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubeUnAuthClient)(nil).Apply), ctx, kubeconfig, obj) } -// Delete mocks base method. -func (m *MockKubeUnAuthClient) Delete(ctx context.Context, name, namespace, kubeconfig string, obj runtime.Object) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, name, namespace, kubeconfig, obj) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockKubeUnAuthClientMockRecorder) Delete(ctx, name, namespace, kubeconfig, obj interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeUnAuthClient)(nil).Delete), ctx, name, namespace, kubeconfig, obj) -} - // KubeconfigClient mocks base method. func (m *MockKubeUnAuthClient) KubeconfigClient(kubeconfig string) kubernetes.Client { m.ctrl.T.Helper() diff --git a/pkg/providers/snow/snow.go b/pkg/providers/snow/snow.go index 0d57f374744a7..158926e33c85c 100644 --- a/pkg/providers/snow/snow.go +++ b/pkg/providers/snow/snow.go @@ -49,7 +49,6 @@ type SnowProvider struct { type KubeUnAuthClient interface { KubeconfigClient(kubeconfig string) kubernetes.Client - Delete(ctx context.Context, name, namespace, kubeconfig string, obj runtime.Object) error Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error } @@ -334,12 +333,15 @@ func (p *SnowProvider) UpgradeNeeded(ctx context.Context, newSpec, oldSpec *clus } func (p *SnowProvider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error { + client := p.kubeUnAuthClient.KubeconfigClient(clusterSpec.ManagementCluster.KubeconfigFile) + for _, mc := range clusterSpec.SnowMachineConfigs { - if err := p.kubeUnAuthClient.Delete(ctx, mc.Name, mc.Namespace, clusterSpec.ManagementCluster.KubeconfigFile, mc); err != nil { + if err := client.Delete(ctx, mc); err != nil { return err } } - return p.kubeUnAuthClient.Delete(ctx, clusterSpec.SnowDatacenter.GetName(), clusterSpec.SnowDatacenter.GetNamespace(), clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.SnowDatacenter) + + return client.Delete(ctx, clusterSpec.SnowDatacenter) } func (p *SnowProvider) PostClusterDeleteValidate(_ context.Context, _ *types.Cluster) error { diff --git a/pkg/providers/snow/snow_test.go b/pkg/providers/snow/snow_test.go index 74b6eb240e483..2b2f40ab6c2df 100644 --- a/pkg/providers/snow/snow_test.go +++ b/pkg/providers/snow/snow_test.go @@ -911,25 +911,19 @@ func TestMachineConfigs(t *testing.T) { func TestDeleteResources(t *testing.T) { tt := newSnowTest(t) - tt.kubeUnAuthClient.EXPECT().Delete( - tt.ctx, - tt.clusterSpec.SnowDatacenter.Name, - tt.clusterSpec.SnowDatacenter.Namespace, + tt.kubeUnAuthClient.EXPECT().KubeconfigClient( tt.clusterSpec.ManagementCluster.KubeconfigFile, + ).Return(tt.kubeconfigClient) + tt.kubeconfigClient.EXPECT().Delete( + tt.ctx, tt.clusterSpec.SnowDatacenter, ).Return(nil) - tt.kubeUnAuthClient.EXPECT().Delete( + tt.kubeconfigClient.EXPECT().Delete( tt.ctx, - tt.clusterSpec.SnowMachineConfigs["test-cp"].Name, - tt.clusterSpec.SnowMachineConfigs["test-cp"].Namespace, - tt.clusterSpec.ManagementCluster.KubeconfigFile, tt.clusterSpec.SnowMachineConfigs["test-cp"], ).Return(nil) - tt.kubeUnAuthClient.EXPECT().Delete( + tt.kubeconfigClient.EXPECT().Delete( tt.ctx, - tt.clusterSpec.SnowMachineConfigs["test-wn"].Name, - tt.clusterSpec.SnowMachineConfigs["test-wn"].Namespace, - tt.clusterSpec.ManagementCluster.KubeconfigFile, tt.clusterSpec.SnowMachineConfigs["test-wn"], ).Return(nil) diff --git a/test/e2e/metallb.go b/test/e2e/metallb.go index c437a1e1df81e..3ea429f983411 100644 --- a/test/e2e/metallb.go +++ b/test/e2e/metallb.go @@ -16,6 +16,7 @@ import ( "github.com/aws/eks-anywhere/internal/pkg/api" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/kubeconfig" "github.com/aws/eks-anywhere/test/framework" ) @@ -186,7 +187,11 @@ spec: } t.Cleanup(func() { - test.KubectlClient.Delete(ctx, "service", "my-service", "default", kubeconfig.FromClusterName(test.ClusterName)) + opts := &kubernetes.KubectlDeleteOptions{ + Name: "my-service", + Namespace: "default", + } + test.KubectlClient.Delete(ctx, "service", kubeconfig.FromClusterName(test.ClusterName), opts) }) test.CreateResource(ctx, ` apiVersion: v1 @@ -332,7 +337,11 @@ spec: } t.Cleanup(func() { - test.KubectlClient.Delete(ctx, "service", "my-service", "default", kubeconfig.FromClusterName(test.ClusterName)) + opts := &kubernetes.KubectlDeleteOptions{ + Name: "my-service", + Namespace: "default", + } + test.KubectlClient.Delete(ctx, "service", kubeconfig.FromClusterName(test.ClusterName), opts) }) test.CreateResource(ctx, ` apiVersion: v1 diff --git a/test/framework/vspherecsi.go b/test/framework/vspherecsi.go index e9b197ca0b715..1e9a58a797f05 100644 --- a/test/framework/vspherecsi.go +++ b/test/framework/vspherecsi.go @@ -8,6 +8,7 @@ import ( v1 "k8s.io/api/storage/v1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/retrier" ) @@ -61,7 +62,11 @@ func (e *ClusterE2ETest) DeleteVSphereCSI() { ctx := context.Background() e.deleteVsphereCSIResources(ctx) csiClusterResourceSetName := fmt.Sprintf("%s-csi", e.ClusterName) - err := e.KubectlClient.Delete(ctx, "clusterresourceset", csiClusterResourceSetName, constants.EksaSystemNamespace, e.Cluster().KubeconfigFile) + opts := &kubernetes.KubectlDeleteOptions{ + Name: csiClusterResourceSetName, + Namespace: constants.EksaSystemNamespace, + } + err := e.KubectlClient.Delete(ctx, "clusterresourceset", e.Cluster().KubeconfigFile, opts) if err != nil { e.T.Fatal(err) } @@ -74,14 +79,24 @@ func (w *WorkloadCluster) DeleteWorkloadVsphereCSI() { } func (e *ClusterE2ETest) deleteVsphereCSIResources(ctx context.Context) { - err := e.KubectlClient.Delete(ctx, "deployment", csiDeployment, kubeSystemNameSpace, e.Cluster().KubeconfigFile) + opts := &kubernetes.KubectlDeleteOptions{ + Name: csiDeployment, + Namespace: kubeSystemNameSpace, + } + err := e.KubectlClient.Delete(ctx, "deployment", e.Cluster().KubeconfigFile, opts) if err != nil { e.T.Fatal(err) } - err = e.KubectlClient.Delete(ctx, "daemonset", csiDaemonSet, kubeSystemNameSpace, e.Cluster().KubeconfigFile) + + opts = &kubernetes.KubectlDeleteOptions{ + Name: csiDaemonSet, + Namespace: kubeSystemNameSpace, + } + err = e.KubectlClient.Delete(ctx, "daemonset", e.Cluster().KubeconfigFile, opts) if err != nil { e.T.Fatal(err) } + err = e.KubectlClient.DeleteClusterObject(ctx, "storageclass", csiStorageClassName, e.Cluster().KubeconfigFile) if err != nil { e.T.Fatal(err)