Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Kubernetes version 1.20.15 #1751

Merged
merged 11 commits into from
Nov 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ jobs:
fail-fast: false
matrix:
kube-version:
- "1.20.15"
- "1.23"
- "1.30"
test-scenario:
Expand All @@ -74,6 +75,8 @@ jobs:
- "cli-upgrade"
- "workload-lifecycle"
include:
- kube-version: "1.20.15"
kind-image: "kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394"
- kube-version: "1.23"
kind-image: "kindest/node:v1.23.17@sha256:14d0a9a892b943866d7e6be119a06871291c517d279aedb816a4b4bc0ec0a5b3"
- kube-version: "1.30"
Expand Down
4 changes: 3 additions & 1 deletion autoscaler/controllers/collectorsgroup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/version"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand All @@ -37,6 +38,7 @@ type CollectorsGroupReconciler struct {
Scheme *runtime.Scheme
ImagePullSecrets []string
OdigosVersion string
K8sVersion *version.Version
DisableNameProcessor bool
Config *controllerconfig.ControllerConfig
}
Expand All @@ -50,7 +52,7 @@ func (r *CollectorsGroupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, err
}

err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor)
err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor)
if err != nil {
return ctrl.Result{}, err
}
Expand Down
29 changes: 18 additions & 11 deletions autoscaler/controllers/datacollection/daemonset.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"github.com/odigos-io/odigos/autoscaler/controllers/common"
"github.com/odigos-io/odigos/autoscaler/controllers/datacollection/custom"
"github.com/odigos-io/odigos/autoscaler/utils"
"k8s.io/apimachinery/pkg/util/version"

"github.com/odigos-io/odigos/k8sutils/pkg/consts"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -44,7 +46,8 @@ type DelayManager struct {
}

// RunSyncDaemonSetWithDelayAndSkipNewCalls runs the function with the specified delay and skips new calls until the function execution is finished
func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Duration, retries int, dests *odigosv1.DestinationList, collection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, secrets []string, version string) {
func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Duration, retries int, dests *odigosv1.DestinationList,
collection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, secrets []string, version string, k8sVersion *version.Version) {
dm.mu.Lock()
defer dm.mu.Unlock()

Expand Down Expand Up @@ -73,7 +76,7 @@ func (dm *DelayManager) RunSyncDaemonSetWithDelayAndSkipNewCalls(delay time.Dura
}()

for i := 0; i < retries; i++ {
_, err = syncDaemonSet(ctx, dests, collection, c, scheme, secrets, version)
_, err = syncDaemonSet(ctx, dests, collection, c, scheme, secrets, version, k8sVersion)
if err == nil {
return
}
Expand All @@ -88,7 +91,7 @@ func (dm *DelayManager) finishProgress() {
}

func syncDaemonSet(ctx context.Context, dests *odigosv1.DestinationList, datacollection *odigosv1.CollectorsGroup,
c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string) (*appsv1.DaemonSet, error) {
c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version) (*appsv1.DaemonSet, error) {
logger := log.FromContext(ctx)

odigletDaemonsetPodSpec, err := getOdigletDaemonsetPodSpec(ctx, c, datacollection.Namespace)
Expand All @@ -109,7 +112,7 @@ func syncDaemonSet(ctx context.Context, dests *odigosv1.DestinationList, datacol
logger.Error(err, "Failed to get signals from otelcol config")
return nil, err
}
desiredDs, err := getDesiredDaemonSet(datacollection, otelcolConfigContent, scheme, imagePullSecrets, odigosVersion, odigletDaemonsetPodSpec)
desiredDs, err := getDesiredDaemonSet(datacollection, otelcolConfigContent, scheme, imagePullSecrets, odigosVersion, k8sVersion, odigletDaemonsetPodSpec)
if err != nil {
logger.Error(err, "Failed to get desired DaemonSet")
return nil, err
Expand Down Expand Up @@ -166,7 +169,7 @@ func getOdigletDaemonsetPodSpec(ctx context.Context, c client.Client, namespace
}

func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData string,
scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string,
scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version,
odigletDaemonsetPodSpec *corev1.PodSpec,
) (*appsv1.DaemonSet, error) {
// TODO(edenfed): add log volumes only if needed according to apps or dests
Expand All @@ -179,7 +182,14 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st
maxUnavailable := intstr.FromString("50%")
// maxSurge is the number of pods that can be created above the desired number of pods.
// we do not want more then 1 datacollection pod on the same node as they need to bind to oltp ports.
maxSurge := intstr.FromInt(0)
rollingUpdate := &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: &maxUnavailable,
}
// maxSurge was added to the Kubernetes api at version 1.21.alpha1, we want to be sure so we used 1.22 for the check, the fallback is without it
if k8sVersion != nil && k8sVersion.AtLeast(version.MustParse("1.22.0")) {
maxSurge := intstr.FromInt(0)
rollingUpdate.MaxSurge = &maxSurge
}

desiredDs := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -192,11 +202,8 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st
MatchLabels: NodeCollectorsLabels,
},
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: &maxUnavailable,
MaxSurge: &maxSurge,
},
Type: appsv1.RollingUpdateDaemonSetStrategyType,
RollingUpdate: rollingUpdate,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Expand Down
9 changes: 5 additions & 4 deletions autoscaler/controllers/datacollection/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/odigos-io/odigos/k8sutils/pkg/consts"
"github.com/odigos-io/odigos/k8sutils/pkg/env"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
Expand All @@ -18,7 +19,7 @@ const (
syncDaemonsetRetry = 3
)

func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, disableNameProcessor bool) error {
func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error {
logger := log.FromContext(ctx)

var instApps odigosv1.InstrumentedApplicationList
Expand Down Expand Up @@ -51,12 +52,12 @@ func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePul
return err
}

return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, disableNameProcessor)
return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor)
}

func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList,
dataCollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client,
scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, disableNameProcessor bool) error {
scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error {
logger := log.FromContext(ctx)
logger.V(0).Info("Syncing data collection")

Expand All @@ -66,7 +67,7 @@ func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *o
return err
}

dm.RunSyncDaemonSetWithDelayAndSkipNewCalls(time.Duration(env.GetSyncDaemonSetDelay())*time.Second, syncDaemonsetRetry, dests, dataCollection, ctx, c, scheme, imagePullSecrets, odigosVersion)
dm.RunSyncDaemonSetWithDelayAndSkipNewCalls(time.Duration(env.GetSyncDaemonSetDelay())*time.Second, syncDaemonsetRetry, dests, dataCollection, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion)

return nil
}
4 changes: 3 additions & 1 deletion autoscaler/controllers/instrumentedapplication_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/odigos-io/odigos/autoscaler/controllers/datacollection"
predicate "github.com/odigos-io/odigos/k8sutils/pkg/predicate"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/version"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
Expand All @@ -33,13 +34,14 @@ type InstrumentedApplicationReconciler struct {
Scheme *runtime.Scheme
ImagePullSecrets []string
OdigosVersion string
K8sVersion *version.Version
DisableNameProcessor bool
}

func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
logger.V(0).Info("Reconciling InstrumentedApps")
err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor)
err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor)
if err != nil {
return ctrl.Result{}, err
}
Expand Down
4 changes: 3 additions & 1 deletion autoscaler/controllers/processor_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/odigos-io/odigos/autoscaler/controllers/datacollection"
"github.com/odigos-io/odigos/autoscaler/controllers/gateway"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/version"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
Expand All @@ -19,6 +20,7 @@ type ProcessorReconciler struct {
Scheme *runtime.Scheme
ImagePullSecrets []string
OdigosVersion string
K8sVersion *version.Version
DisableNameProcessor bool
Config *controllerconfig.ControllerConfig
}
Expand All @@ -33,7 +35,7 @@ func (r *ProcessorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, err
}

err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.DisableNameProcessor)
err = datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor)
if err != nil {
return ctrl.Result{}, err
}
Expand Down
28 changes: 22 additions & 6 deletions autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"

"github.com/odigos-io/odigos/k8sutils/pkg/env"
odigosver "github.com/odigos-io/odigos/k8sutils/pkg/version"

corev1 "k8s.io/api/core/v1"

Expand All @@ -45,6 +46,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/version"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
Expand Down Expand Up @@ -84,7 +86,6 @@ func main() {
var probeAddr string
var imagePullSecretsString string
var imagePullSecrets []string
odigosVersion := os.Getenv("ODIGOS_VERSION")

flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
Expand All @@ -95,9 +96,15 @@ func main() {
"The image pull secrets to use for the collectors created by autoscaler")
flag.StringVar(&nameutils.ImagePrefix, "image-prefix", "", "The image prefix to use for the collectors created by autoscaler")

odigosVersion := os.Getenv("ODIGOS_VERSION")
if odigosVersion == "" {
flag.StringVar(&odigosVersion, "version", "", "for development purposes only")
}
// Get k8s version
k8sVersion, err := odigosver.GetKubernetesVersion()
if err != nil {
setupLog.Error(err, "unable to get Kubernetes version, continuing with default oldest supported version")
}

opts := ctrlzap.Options{
Development: true,
Expand Down Expand Up @@ -126,7 +133,8 @@ func main() {
nsSelector := client.InNamespace(odigosNs).AsSelector()
clusterCollectorLabelSelector := labels.Set(gateway.ClusterCollectorGateway).AsSelector()

mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
cfg := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
Expand Down Expand Up @@ -162,10 +170,15 @@ func main() {
os.Exit(1)
}

err = MigrateCollectorsWorkloadToNewLabels(context.Background(), mgr.GetClient(), odigosNs)
if err != nil {
setupLog.Error(err, "unable to migrate collectors workload to new labels")
os.Exit(1)
// The labaling was for ver 1.0.91, migration is not releavant for old k8s versions which couln't run.
// This is the reason we skip it for versions < 1.23 (Also, versions < 1.23 require a non-caching client and API chane)
if k8sVersion != nil && k8sVersion.GreaterThan(version.MustParse("v1.23")) {
// Use the cached client for versions >= 1.23
err = MigrateCollectorsWorkloadToNewLabels(context.Background(), mgr.GetClient(), odigosNs)
if err != nil {
setupLog.Error(err, "unable to migrate collectors workload to new labels")
os.Exit(1)
}
}

// The name processor is used to transform device ids injected with the virtual device,
Expand Down Expand Up @@ -194,6 +207,7 @@ func main() {
Scheme: mgr.GetScheme(),
ImagePullSecrets: imagePullSecrets,
OdigosVersion: odigosVersion,
K8sVersion: k8sVersion,
DisableNameProcessor: disableNameProcessor,
Config: config,
}).SetupWithManager(mgr); err != nil {
Expand All @@ -205,6 +219,7 @@ func main() {
Scheme: mgr.GetScheme(),
ImagePullSecrets: imagePullSecrets,
OdigosVersion: odigosVersion,
K8sVersion: k8sVersion,
DisableNameProcessor: disableNameProcessor,
Config: config,
}).SetupWithManager(mgr); err != nil {
Expand All @@ -216,6 +231,7 @@ func main() {
Scheme: mgr.GetScheme(),
ImagePullSecrets: imagePullSecrets,
OdigosVersion: odigosVersion,
K8sVersion: k8sVersion,
DisableNameProcessor: disableNameProcessor,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "InstrumentedApplication")
Expand Down
25 changes: 12 additions & 13 deletions cli/cmd/install.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ var (
var (
// minK8SVersionForInstallation is the minimum Kubernetes version required for Odigos installation
// this value must be in sync with the one defined in the kubeVersion field in Chart.yaml
minK8SVersionForInstallation = version.MustParse("v1.23.0")
minK8SVersionForInstallation = version.MustParse("v1.20.15")
)

type ResourceCreationFunc func(ctx context.Context, cmd *cobra.Command, client *kube.Client, ns string) error
Expand Down Expand Up @@ -80,23 +80,22 @@ This command will install k8s components that will auto-instrument your applicat
// Check if the cluster meets the minimum requirements
kc := cmd.Flag("kubeconfig").Value.String()
details, err := autodetect.DetectK8SClusterDetails(ctx, kc, client)
if err == nil {
autodetect.CurrentKubernetesVersion = autodetect.KubernetesVersion{
Kind: details.Kind,
Version: details.K8SVersion,
}
if !errors.Is(err, autodetect.ErrCannotDetectClusterKind) {
autodetect.CurrentKubernetesVersion.Kind = details.Kind
fmt.Printf("Detected cluster: Kubernetes kind: %s\n", details.Kind)
} else {
fmt.Println("Unknown Kubernetes cluster detected, proceeding with installation")
}

if !errors.Is(err, autodetect.ErrCannotDetectK8sVersion) {
autodetect.CurrentKubernetesVersion.Version = details.K8SVersion
if details.K8SVersion.LessThan(minK8SVersionForInstallation) {
fmt.Printf("\033[31mERROR\033[0m Odigos requires Kubernetes version %s or higher but found %s, aborting\n", minK8SVersionForInstallation.String(), details.K8SVersion.String())
os.Exit(1)
}
fmt.Printf("Detected cluster: %s Kubernetes version: %s\n", details.Kind, details.K8SVersion.String())
fmt.Printf("Detected cluster: Kubernetes version: %s\n", details.K8SVersion.String())
} else {
if errors.Is(err, autodetect.ErrCannotDetectClusterKind) {
fmt.Println("Unknown Kubernetes cluster detected, proceeding with installation")
}
if errors.Is(err, autodetect.ErrCannotDetectK8sVersion) {
fmt.Println("Unknown Kubernetes version detected, proceeding with installation")
}
fmt.Println("Unknown Kubernetes version detected, proceeding with installation")
}

var odigosProToken string
Expand Down
18 changes: 12 additions & 6 deletions cli/cmd/resources/odiglet.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
k8sversion "k8s.io/apimachinery/pkg/util/version"
)

const (
Expand Down Expand Up @@ -446,7 +447,15 @@ func NewOdigletDaemonSet(ns string, version string, imagePrefix string, imageNam
maxUnavailable := intstr.FromString("50%")
// maxSurge is the number of pods that can be created above the desired number of pods.
// we do not want more then 1 odiglet pod on the same node as it is not supported by the eBPF.
maxSurge := intstr.FromInt(0)
// Only set maxSurge if Kubernetes version is >= 1.22
// Prepare RollingUpdate based on version support for maxSurge
rollingUpdate := &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: &maxUnavailable,
}
if autodetect.CurrentKubernetesVersion.Version != nil && autodetect.CurrentKubernetesVersion.Version.AtLeast(k8sversion.MustParse("v1.22")) {
maxSurge := intstr.FromInt(0)
rollingUpdate.MaxSurge = &maxSurge
}

return &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
Expand All @@ -467,11 +476,8 @@ func NewOdigletDaemonSet(ns string, version string, imagePrefix string, imageNam
},
},
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: appsv1.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: &maxUnavailable,
MaxSurge: &maxSurge,
},
Type: appsv1.RollingUpdateDaemonSetStrategyType,
RollingUpdate: rollingUpdate,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Expand Down
Loading
Loading