From 56a0a4886cfdcddd88f1f068a135b1a56889d822 Mon Sep 17 00:00:00 2001 From: David <65228999+david336362@users.noreply.github.com> Date: Fri, 15 Nov 2024 16:35:22 +0200 Subject: [PATCH] Support Kubernetes version 1.20.15 (#1751) Co-authored-by: David --- .github/workflows/e2e.yaml | 3 ++ .../controllers/collectorsgroup_controller.go | 4 ++- .../controllers/datacollection/daemonset.go | 29 +++++++++------- autoscaler/controllers/datacollection/root.go | 9 ++--- .../instrumentedapplication_controller.go | 4 ++- .../controllers/processor_controller.go | 4 ++- autoscaler/main.go | 28 ++++++++++++---- cli/cmd/install.go | 25 +++++++------- cli/cmd/resources/odiglet.go | 18 ++++++---- cli/pkg/autodetect/kind.go | 4 +-- cli/pkg/kube/client.go | 32 ++++++++++++++++-- helm/odigos/Chart.yaml | 2 +- helm/odigos/templates/odiglet/daemonset.yaml | 2 ++ k8sutils/pkg/version/version.go | 33 +++++++++++++++++++ 14 files changed, 148 insertions(+), 49 deletions(-) create mode 100644 k8sutils/pkg/version/version.go diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index eec69175e5..dbaaba21e2 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -65,6 +65,7 @@ jobs: fail-fast: false matrix: kube-version: + - "1.20.15" - "1.23" - "1.30" test-scenario: @@ -74,6 +75,8 @@ jobs: - "cli-upgrade" - "workload-lifecycle" include: + - kube-version: "1.20.15" + kind-image: "kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394" - kube-version: "1.23" kind-image: "kindest/node:v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3" - kube-version: "1.30" diff --git a/autoscaler/controllers/collectorsgroup_controller.go b/autoscaler/controllers/collectorsgroup_controller.go index eecc5bf14b..51dcb38613 100644 --- a/autoscaler/controllers/collectorsgroup_controller.go +++ b/autoscaler/controllers/collectorsgroup_controller.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -37,6 +38,7 @@ type CollectorsGroupReconciler struct { Scheme *runtime.Scheme ImagePullSecrets []string OdigosVersion string + K8sVersion *version.Version DisableNameProcessor bool Config *controllerconfig.ControllerConfig } @@ -50,7 +52,7 @@ func (r *CollectorsGroupReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor) + err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err } diff --git a/autoscaler/controllers/datacollection/daemonset.go b/autoscaler/controllers/datacollection/daemonset.go index 0ea7df5a9d..1d93b5d9a6 100644 --- a/autoscaler/controllers/datacollection/daemonset.go +++ b/autoscaler/controllers/datacollection/daemonset.go @@ -10,6 +10,8 @@ import ( "github.com/odigos-io/odigos/autoscaler/controllers/common" "github.com/odigos-io/odigos/autoscaler/controllers/datacollection/custom" "github.com/odigos-io/odigos/autoscaler/utils" + "k8s.io/apimachinery/pkg/util/version" + "github.com/odigos-io/odigos/k8sutils/pkg/consts" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -44,7 +46,8 @@ type DelayManager struct { } // RunSyncDaemonSetWithDelayAndSkipNewCalls runs the function with the specified delay and skips new calls until the function execution is finished -func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Duration, retries int, dests *odigosv1.DestinationList, collection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, secrets []string, version string) { +func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Duration, retries int, dests *odigosv1.DestinationList, + collection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, secrets []string, version string, k8sVersion *version.Version) { dm.mu.Lock() defer dm.mu.Unlock() @@ -73,7 +76,7 @@ func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Dura }() for i := 0; i < retries; i++ { - _, err = syncDaemonSet(ctx, dests, collection, c, scheme, secrets, version) + _, err = syncDaemonSet(ctx, dests, collection, c, scheme, secrets, version, k8sVersion) if err == nil { return } @@ -88,7 +91,7 @@ func (dm *DelayManager) finishProgress() { } func syncDaemonSet(ctx context.Context, dests *odigosv1.DestinationList, datacollection *odigosv1.CollectorsGroup, - c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string) (*appsv1.DaemonSet, error) { + c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version) (*appsv1.DaemonSet, error) { logger := log.FromContext(ctx) odigletDaemonsetPodSpec, err := getOdigletDaemonsetPodSpec(ctx, c, datacollection.Namespace) @@ -109,7 +112,7 @@ func syncDaemonSet(ctx context.Context, dests *odigosv1.DestinationList, datacol logger.Error(err, "Failed to get signals from otelcol config") return nil, err } - desiredDs, err := getDesiredDaemonSet(datacollection, otelcolConfigContent, scheme, imagePullSecrets, odigosVersion, odigletDaemonsetPodSpec) + desiredDs, err := getDesiredDaemonSet(datacollection, otelcolConfigContent, scheme, imagePullSecrets, odigosVersion, k8sVersion, odigletDaemonsetPodSpec) if err != nil { logger.Error(err, "Failed to get desired DaemonSet") return nil, err @@ -166,7 +169,7 @@ func getOdigletDaemonsetPodSpec(ctx context.Context, c client.Client, namespace } func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData string, - scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, + scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, odigletDaemonsetPodSpec *corev1.PodSpec, ) (*appsv1.DaemonSet, error) { // TODO(edenfed): add log volumes only if needed according to apps or dests @@ -179,7 +182,14 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st maxUnavailable := intstr.FromString("50%") // maxSurge is the number of pods that can be created above the desired number of pods. // we do not want more then 1 datacollection pod on the same node as they need to bind to oltp ports. - maxSurge := intstr.FromInt(0) + rollingUpdate := &appsv1.RollingUpdateDaemonSet{ + MaxUnavailable: &maxUnavailable, + } + // maxSurge was added to the Kubernetes api at version 1.21.alpha1, we want to be sure so we used 1.22 for the check, the fallback is without it + if k8sVersion != nil && k8sVersion.AtLeast(version.MustParse("1.22.0")) { + maxSurge := intstr.FromInt(0) + rollingUpdate.MaxSurge = &maxSurge + } desiredDs := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ @@ -192,11 +202,8 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st MatchLabels: NodeCollectorsLabels, }, UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &appsv1.RollingUpdateDaemonSet{ - MaxUnavailable: &maxUnavailable, - MaxSurge: &maxSurge, - }, + Type: appsv1.RollingUpdateDaemonSetStrategyType, + RollingUpdate: rollingUpdate, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/autoscaler/controllers/datacollection/root.go b/autoscaler/controllers/datacollection/root.go index 0ad92b35e2..80a6f11def 100644 --- a/autoscaler/controllers/datacollection/root.go +++ b/autoscaler/controllers/datacollection/root.go @@ -8,6 +8,7 @@ import ( "github.com/odigos-io/odigos/k8sutils/pkg/consts" "github.com/odigos-io/odigos/k8sutils/pkg/env" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -18,7 +19,7 @@ const ( syncDaemonsetRetry = 3 ) -func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, disableNameProcessor bool) error { +func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) var instApps odigosv1.InstrumentedApplicationList @@ -51,12 +52,12 @@ func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePul return err } - return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, disableNameProcessor) + return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) } func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, dataCollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, - scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, disableNameProcessor bool) error { + scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) logger.V(0).Info("Syncing data collection") @@ -66,7 +67,7 @@ func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *o return err } - dm.RunSyncDaemonSetWithDelayAndSkipNewCalls(time.Duration(env.GetSyncDaemonSetDelay())*time.Second, syncDaemonsetRetry, dests, dataCollection, ctx, c, scheme, imagePullSecrets, odigosVersion) + dm.RunSyncDaemonSetWithDelayAndSkipNewCalls(time.Duration(env.GetSyncDaemonSetDelay())*time.Second, syncDaemonsetRetry, dests, dataCollection, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion) return nil } diff --git a/autoscaler/controllers/instrumentedapplication_controller.go b/autoscaler/controllers/instrumentedapplication_controller.go index c600fdcef6..3061989973 100644 --- a/autoscaler/controllers/instrumentedapplication_controller.go +++ b/autoscaler/controllers/instrumentedapplication_controller.go @@ -23,6 +23,7 @@ import ( "github.com/odigos-io/odigos/autoscaler/controllers/datacollection" predicate "github.com/odigos-io/odigos/k8sutils/pkg/predicate" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -33,13 +34,14 @@ type InstrumentedApplicationReconciler struct { Scheme *runtime.Scheme ImagePullSecrets []string OdigosVersion string + K8sVersion *version.Version DisableNameProcessor bool } func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) logger.V(0).Info("Reconciling InstrumentedApps") - err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor) + err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err } diff --git a/autoscaler/controllers/processor_controller.go b/autoscaler/controllers/processor_controller.go index 8814a2d019..3e0ea85844 100644 --- a/autoscaler/controllers/processor_controller.go +++ b/autoscaler/controllers/processor_controller.go @@ -8,6 +8,7 @@ import ( "github.com/odigos-io/odigos/autoscaler/controllers/datacollection" "github.com/odigos-io/odigos/autoscaler/controllers/gateway" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -19,6 +20,7 @@ type ProcessorReconciler struct { Scheme *runtime.Scheme ImagePullSecrets []string OdigosVersion string + K8sVersion *version.Version DisableNameProcessor bool Config *controllerconfig.ControllerConfig } @@ -33,7 +35,7 @@ func (r *ProcessorReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, err } - err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor) + err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err } diff --git a/autoscaler/main.go b/autoscaler/main.go index 2d55a5dae6..e33ca39cc1 100644 --- a/autoscaler/main.go +++ b/autoscaler/main.go @@ -25,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "github.com/odigos-io/odigos/k8sutils/pkg/env" + odigosver "github.com/odigos-io/odigos/k8sutils/pkg/version" corev1 "k8s.io/api/core/v1" @@ -45,6 +46,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -84,7 +86,6 @@ func main() { var probeAddr string var imagePullSecretsString string var imagePullSecrets []string - odigosVersion := os.Getenv("ODIGOS_VERSION") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") @@ -95,9 +96,15 @@ func main() { "The image pull secrets to use for the collectors created by autoscaler") flag.StringVar(&nameutils.ImagePrefix, "image-prefix", "", "The image prefix to use for the collectors created by autoscaler") + odigosVersion := os.Getenv("ODIGOS_VERSION") if odigosVersion == "" { flag.StringVar(&odigosVersion, "version", "", "for development purposes only") } + // Get k8s version + k8sVersion, err := odigosver.GetKubernetesVersion() + if err != nil { + setupLog.Error(err, "unable to get Kubernetes version, continuing with default oldest supported version") + } opts := ctrlzap.Options{ Development: true, @@ -126,7 +133,8 @@ func main() { nsSelector := client.InNamespace(odigosNs).AsSelector() clusterCollectorLabelSelector := labels.Set(gateway.ClusterCollectorGateway).AsSelector() - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + cfg := ctrl.GetConfigOrDie() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme, Metrics: metricsserver.Options{ BindAddress: metricsAddr, @@ -162,10 +170,15 @@ func main() { os.Exit(1) } - err = MigrateCollectorsWorkloadToNewLabels(context.Background(), mgr.GetClient(), odigosNs) - if err != nil { - setupLog.Error(err, "unable to migrate collectors workload to new labels") - os.Exit(1) + // The labaling was for ver 1.0.91, migration is not releavant for old k8s versions which couln't run. + // This is the reason we skip it for versions < 1.23 (Also, versions < 1.23 require a non-caching client and API chane) + if k8sVersion != nil && k8sVersion.GreaterThan(version.MustParse("v1.23")) { + // Use the cached client for versions >= 1.23 + err = MigrateCollectorsWorkloadToNewLabels(context.Background(), mgr.GetClient(), odigosNs) + if err != nil { + setupLog.Error(err, "unable to migrate collectors workload to new labels") + os.Exit(1) + } } // The name processor is used to transform device ids injected with the virtual device, @@ -194,6 +207,7 @@ func main() { Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, OdigosVersion: odigosVersion, + K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, Config: config, }).SetupWithManager(mgr); err != nil { @@ -205,6 +219,7 @@ func main() { Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, OdigosVersion: odigosVersion, + K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, Config: config, }).SetupWithManager(mgr); err != nil { @@ -216,6 +231,7 @@ func main() { Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, OdigosVersion: odigosVersion, + K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "InstrumentedApplication") diff --git a/cli/cmd/install.go b/cli/cmd/install.go index b7f5f98928..447f20da6b 100644 --- a/cli/cmd/install.go +++ b/cli/cmd/install.go @@ -50,7 +50,7 @@ var ( var ( // minK8SVersionForInstallation is the minimum Kubernetes version required for Odigos installation // this value must be in sync with the one defined in the kubeVersion field in Chart.yaml - minK8SVersionForInstallation = version.MustParse("v1.23.0") + minK8SVersionForInstallation = version.MustParse("v1.20.15") ) type ResourceCreationFunc func(ctx context.Context, cmd *cobra.Command, client *kube.Client, ns string) error @@ -80,23 +80,22 @@ This command will install k8s components that will auto-instrument your applicat // Check if the cluster meets the minimum requirements kc := cmd.Flag("kubeconfig").Value.String() details, err := autodetect.DetectK8SClusterDetails(ctx, kc, client) - if err == nil { - autodetect.CurrentKubernetesVersion = autodetect.KubernetesVersion{ - Kind: details.Kind, - Version: details.K8SVersion, - } + if !errors.Is(err, autodetect.ErrCannotDetectClusterKind) { + autodetect.CurrentKubernetesVersion.Kind = details.Kind + fmt.Printf("Detected cluster: Kubernetes kind: %s\n", details.Kind) + } else { + fmt.Println("Unknown Kubernetes cluster detected, proceeding with installation") + } + + if !errors.Is(err, autodetect.ErrCannotDetectK8sVersion) { + autodetect.CurrentKubernetesVersion.Version = details.K8SVersion if details.K8SVersion.LessThan(minK8SVersionForInstallation) { fmt.Printf("\033[31mERROR\033[0m Odigos requires Kubernetes version %s or higher but found %s, aborting\n", minK8SVersionForInstallation.String(), details.K8SVersion.String()) os.Exit(1) } - fmt.Printf("Detected cluster: %s Kubernetes version: %s\n", details.Kind, details.K8SVersion.String()) + fmt.Printf("Detected cluster: Kubernetes version: %s\n", details.K8SVersion.String()) } else { - if errors.Is(err, autodetect.ErrCannotDetectClusterKind) { - fmt.Println("Unknown Kubernetes cluster detected, proceeding with installation") - } - if errors.Is(err, autodetect.ErrCannotDetectK8sVersion) { - fmt.Println("Unknown Kubernetes version detected, proceeding with installation") - } + fmt.Println("Unknown Kubernetes version detected, proceeding with installation") } var odigosProToken string diff --git a/cli/cmd/resources/odiglet.go b/cli/cmd/resources/odiglet.go index b5cad80b65..c1588121e2 100644 --- a/cli/cmd/resources/odiglet.go +++ b/cli/cmd/resources/odiglet.go @@ -19,6 +19,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + k8sversion "k8s.io/apimachinery/pkg/util/version" ) const ( @@ -446,7 +447,15 @@ func NewOdigletDaemonSet(ns string, version string, imagePrefix string, imageNam maxUnavailable := intstr.FromString("50%") // maxSurge is the number of pods that can be created above the desired number of pods. // we do not want more then 1 odiglet pod on the same node as it is not supported by the eBPF. - maxSurge := intstr.FromInt(0) + // Only set maxSurge if Kubernetes version is >= 1.22 + // Prepare RollingUpdate based on version support for maxSurge + rollingUpdate := &appsv1.RollingUpdateDaemonSet{ + MaxUnavailable: &maxUnavailable, + } + if autodetect.CurrentKubernetesVersion.Version != nil && autodetect.CurrentKubernetesVersion.Version.AtLeast(k8sversion.MustParse("v1.22")) { + maxSurge := intstr.FromInt(0) + rollingUpdate.MaxSurge = &maxSurge + } return &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{ @@ -467,11 +476,8 @@ func NewOdigletDaemonSet(ns string, version string, imagePrefix string, imageNam }, }, UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &appsv1.RollingUpdateDaemonSet{ - MaxUnavailable: &maxUnavailable, - MaxSurge: &maxSurge, - }, + Type: appsv1.RollingUpdateDaemonSetStrategyType, + RollingUpdate: rollingUpdate, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cli/pkg/autodetect/kind.go b/cli/pkg/autodetect/kind.go index fb543f8bb4..73cbaadb64 100644 --- a/cli/pkg/autodetect/kind.go +++ b/cli/pkg/autodetect/kind.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" - "k8s.io/apimachinery/pkg/util/version" "github.com/odigos-io/odigos/cli/pkg/kube" k8sutils "github.com/odigos-io/odigos/k8sutils/pkg/client" + "k8s.io/apimachinery/pkg/util/version" ) type Kind string @@ -77,5 +77,5 @@ func DetectK8SClusterDetails(ctx context.Context, kc string, client *kube.Client }, nil } - return ClusterDetails{}, ErrCannotDetectClusterKind + return ClusterDetails{K8SVersion: ver}, ErrCannotDetectClusterKind } diff --git a/cli/pkg/kube/client.go b/cli/pkg/kube/client.go index c36d5fef73..c1f9d77465 100644 --- a/cli/pkg/kube/client.go +++ b/cli/pkg/kube/client.go @@ -7,6 +7,7 @@ import ( "strconv" k8sutils "github.com/odigos-io/odigos/k8sutils/pkg/client" + odigosver "github.com/odigos-io/odigos/k8sutils/pkg/version" appsv1 "k8s.io/api/apps/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -14,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/version" "sigs.k8s.io/yaml" "github.com/odigos-io/odigos/api/generated/odigos/clientset/versioned/typed/odigos/v1alpha1" @@ -197,7 +199,31 @@ func (c *Client) DeleteOldOdigosSystemObjects(ctx context.Context, resourceAndNa labelSelector := k8slabels.NewSelector().Add(*systemObject).Add(*notLatestVersion).String() resource := resourceAndNamespace.Resource ns := resourceAndNamespace.Namespace - return c.Dynamic.Resource(resource).Namespace(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ - LabelSelector: labelSelector, - }) + k8sVersion, err := odigosver.GetKubernetesVersion() + if err != nil { + fmt.Printf("DeleteOldOdigosSystemObjects failed to get k8s version, proceeding.. :%v", err) + } + // DeleteCollection is only available in k8s 1.23 and above, for older versions we need to list and delete each resource + if k8sVersion != nil && k8sVersion.GreaterThan(version.MustParse("1.23")) { + return c.Dynamic.Resource(resource).Namespace(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: labelSelector, + }) + } else { + listOptions := metav1.ListOptions{ + LabelSelector: labelSelector, + } + resourceList, err := c.Dynamic.Resource(resource).Namespace(ns).List(ctx, listOptions) + if err != nil { + return fmt.Errorf("failed to list resources: %w", err) + } + + // Delete each resource individually + for _, item := range resourceList.Items { + err = c.Dynamic.Resource(resource).Namespace(ns).Delete(ctx, item.GetName(), metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("failed to delete resource %s: %w", item.GetName(), err) + } + } + } + return nil } diff --git a/helm/odigos/Chart.yaml b/helm/odigos/Chart.yaml index f7cb06b950..c6145aa537 100644 --- a/helm/odigos/Chart.yaml +++ b/helm/odigos/Chart.yaml @@ -8,4 +8,4 @@ version: "0.0.0" appVersion: "v0.0.0" icon: https://d2q89wckrml3k4.cloudfront.net/logo.png # minimum kubernetes version required, this value must be in sync with the CLI install value -kubeVersion: ">= 1.23.0" +kubeVersion: ">= 1.20.15" diff --git a/helm/odigos/templates/odiglet/daemonset.yaml b/helm/odigos/templates/odiglet/daemonset.yaml index cfd0fd5dda..26d506fd63 100644 --- a/helm/odigos/templates/odiglet/daemonset.yaml +++ b/helm/odigos/templates/odiglet/daemonset.yaml @@ -11,7 +11,9 @@ spec: app.kubernetes.io/name: odiglet updateStrategy: rollingUpdate: + {{- if semverCompare ">=1.22.0" .Capabilities.KubeVersion.Version }} maxSurge: 0 + {{- end }} maxUnavailable: 50% type: RollingUpdate template: diff --git a/k8sutils/pkg/version/version.go b/k8sutils/pkg/version/version.go new file mode 100644 index 0000000000..d2f907f668 --- /dev/null +++ b/k8sutils/pkg/version/version.go @@ -0,0 +1,33 @@ +package version + +import ( + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" +) + +// GetKubernetesVersion returns the Kubernetes version of the cluster +// This util function is intended to be called once during the initialization. +// Do not call this from reconcile or hot path. +func GetKubernetesVersion() (*version.Version, error) { + // Create a Kubernetes REST config directly + cfg, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + // Create a discovery client using the config + discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + + // Retrieve the server version + serverVersion, err := discoveryClient.ServerVersion() + if err != nil { + return nil, err + } + + // Parse and return the version + return version.Parse(serverVersion.String()) +}