From 4bff91eb62fb443a25af304f08919dc54bbd76a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Mudrini=C4=87?= Date: Thu, 20 Jun 2024 18:29:57 +0200 Subject: [PATCH] Refactor the upgrade process to stop using config on upgrade MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marko Mudrinić --- pkg/scripts/kubeadm.go | 2 +- pkg/scripts/kubeadm_test.go | 2 +- .../testdata/TestKubeadmUpgrade-leader.golden | 2 +- .../TestKubeadmUpgrade-v1beta2.golden | 2 +- pkg/tasks/kubeadm_config.go | 71 +++++++++++++++++-- pkg/tasks/kubeadm_upgrade.go | 17 ++--- pkg/tasks/tasks.go | 1 + pkg/templates/kubeadm/kubeadm.go | 16 +++-- pkg/templates/kubeadm/kubeadmv1beta3.go | 54 ++++++++++++-- pkg/templates/kubeadm/v1beta3/kubeadm.go | 41 +++++++---- 10 files changed, 162 insertions(+), 46 deletions(-) diff --git a/pkg/scripts/kubeadm.go b/pkg/scripts/kubeadm.go index 5133fee59..3c583c656 100644 --- a/pkg/scripts/kubeadm.go +++ b/pkg/scripts/kubeadm.go @@ -66,7 +66,7 @@ var ( `) kubeadmUpgradeScriptTemplate = heredoc.Doc(` - echo yes | sudo {{ .KUBEADM_UPGRADE }}{{ if .LEADER }} --config={{ .WORK_DIR }}/cfg/master_{{ .NODE_ID }}.yaml{{ end }} + sudo {{ .KUBEADM_UPGRADE }} sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \; `) diff --git a/pkg/scripts/kubeadm_test.go b/pkg/scripts/kubeadm_test.go index a6e4dbf55..c5b7b7539 100644 --- a/pkg/scripts/kubeadm_test.go +++ b/pkg/scripts/kubeadm_test.go @@ -280,7 +280,7 @@ func TestKubeadmUpgrade(t *testing.T) { name: "leader", args: args{ workdir: "some", - kubeadmCmd: "kubeadm upgrade apply v1.1.1", + kubeadmCmd: "kubeadm upgrade apply --yes v1.1.1", leader: true, }, }, diff --git a/pkg/scripts/testdata/TestKubeadmUpgrade-leader.golden b/pkg/scripts/testdata/TestKubeadmUpgrade-leader.golden index 488d84a66..30cc4b8ad 100644 --- a/pkg/scripts/testdata/TestKubeadmUpgrade-leader.golden +++ b/pkg/scripts/testdata/TestKubeadmUpgrade-leader.golden @@ -1,4 +1,4 @@ set -xeuo pipefail export "PATH=$PATH:/sbin:/usr/local/bin:/opt/bin" -echo yes | sudo kubeadm upgrade apply v1.1.1 --config=some/cfg/master_0.yaml +sudo kubeadm upgrade apply --yes v1.1.1 sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \; diff --git a/pkg/scripts/testdata/TestKubeadmUpgrade-v1beta2.golden b/pkg/scripts/testdata/TestKubeadmUpgrade-v1beta2.golden index 9f97cc3bf..94e8de219 100644 --- a/pkg/scripts/testdata/TestKubeadmUpgrade-v1beta2.golden +++ b/pkg/scripts/testdata/TestKubeadmUpgrade-v1beta2.golden @@ -1,4 +1,4 @@ set -xeuo pipefail export "PATH=$PATH:/sbin:/usr/local/bin:/opt/bin" -echo yes | sudo kubeadm upgrade node +sudo kubeadm upgrade node sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \; diff --git a/pkg/tasks/kubeadm_config.go b/pkg/tasks/kubeadm_config.go index f73071dda..df7653f34 100644 --- a/pkg/tasks/kubeadm_config.go +++ b/pkg/tasks/kubeadm_config.go @@ -17,6 +17,7 @@ limitations under the License. package tasks import ( + "context" "fmt" kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone" @@ -25,6 +26,11 @@ import ( "k8c.io/kubeone/pkg/scripts" "k8c.io/kubeone/pkg/state" "k8c.io/kubeone/pkg/templates/kubeadm" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" ) func determinePauseImage(s *state.State) error { @@ -58,10 +64,7 @@ func generateKubeadm(s *state.State) error { return err } - kubeadmProvider, err := kubeadm.New(s.Cluster.Versions.Kubernetes) - if err != nil { - return err - } + kubeadmProvider := kubeadm.New(s.Cluster.Versions.Kubernetes) for idx := range s.Cluster.ControlPlane.Hosts { node := s.Cluster.ControlPlane.Hosts[idx] @@ -70,7 +73,7 @@ func generateKubeadm(s *state.State) error { return err } - s.Configuration.AddFile(fmt.Sprintf("cfg/master_%d.yaml", node.ID), kubeadmConf) + s.Configuration.AddFile(fmt.Sprintf("cfg/master_%d.yaml", node.ID), kubeadmConf.FullConfiguration) } for idx := range s.Cluster.StaticWorkers.Hosts { @@ -80,7 +83,7 @@ func generateKubeadm(s *state.State) error { return err } - s.Configuration.AddFile(fmt.Sprintf("cfg/worker_%d.yaml", node.ID), kubeadmConf) + s.Configuration.AddFile(fmt.Sprintf("cfg/worker_%d.yaml", node.ID), kubeadmConf.JoinConfiguration) } return s.RunTaskOnAllNodes(uploadKubeadmToNode, state.RunParallel) @@ -89,3 +92,59 @@ func generateKubeadm(s *state.State) error { func uploadKubeadmToNode(s *state.State, _ *kubeoneapi.HostConfig, conn executor.Interface) error { return s.Configuration.UploadTo(conn, s.WorkDir) } + +func uploadKubeadmToConfigMaps(s *state.State) error { + s.Logger.Infof("Rewriting kubeadm ConfigMaps...") + + leader, err := s.Cluster.Leader() + if err != nil { + return err + } + + kubeadmProvider := kubeadm.New(s.Cluster.Versions.Kubernetes) + + kubeadmConfig, err := kubeadmProvider.Config(s, leader) + if err != nil { + return err + } + + updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + return updateConfigMap(s, "kubeadm-config", metav1.NamespaceSystem, "ClusterConfiguration", kubeadmConfig.ClusterConfiguration) + }) + if updateErr != nil { + return err + } + + updateErr = retry.RetryOnConflict(retry.DefaultRetry, func() error { + return updateConfigMap(s, "kubelet-config", metav1.NamespaceSystem, "kubelet", kubeadmConfig.KubeletConfiguration) + }) + if updateErr != nil { + return err + } + + updateErr = retry.RetryOnConflict(retry.DefaultRetry, func() error { + return updateConfigMap(s, "kube-proxy", metav1.NamespaceSystem, "config.conf", kubeadmConfig.KubeProxyConfiguration) + }) + if updateErr != nil { + return err + } + + return nil +} + +func updateConfigMap(s *state.State, name, namespace, key, value string) error { + configMap := corev1.ConfigMap{} + objKey := client.ObjectKey{ + Name: name, + Namespace: namespace, + } + + err := s.DynamicClient.Get(context.Background(), objKey, &configMap) + if err != nil { + return err + } + + configMap.Data[key] = value + + return s.DynamicClient.Update(s.Context, &configMap) +} diff --git a/pkg/tasks/kubeadm_upgrade.go b/pkg/tasks/kubeadm_upgrade.go index 2cbacf8c9..3b8f00bfa 100644 --- a/pkg/tasks/kubeadm_upgrade.go +++ b/pkg/tasks/kubeadm_upgrade.go @@ -24,10 +24,7 @@ import ( ) func upgradeLeaderControlPlane(s *state.State, nodeID int) error { - kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes) - if err != nil { - return err - } + kadm := kubeadm.New(s.Cluster.Versions.Kubernetes) cmd, err := scripts.KubeadmUpgrade(kadm.UpgradeLeaderCommand(), s.WorkDir, true, nodeID) if err != nil { @@ -40,10 +37,7 @@ func upgradeLeaderControlPlane(s *state.State, nodeID int) error { } func upgradeFollowerControlPlane(s *state.State, nodeID int) error { - kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes) - if err != nil { - return err - } + kadm := kubeadm.New(s.Cluster.Versions.Kubernetes) cmd, err := scripts.KubeadmUpgrade(kadm.UpgradeFollowerCommand(), s.WorkDir, false, nodeID) if err != nil { @@ -56,12 +50,9 @@ func upgradeFollowerControlPlane(s *state.State, nodeID int) error { } func upgradeStaticWorker(s *state.State) error { - kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes) - if err != nil { - return err - } + kadm := kubeadm.New(s.Cluster.Versions.Kubernetes) - _, _, err = s.Runner.Run(`sudo `+kadm.UpgradeStaticWorkerCommand(), nil) + _, _, err := s.Runner.Run(`sudo `+kadm.UpgradeStaticWorkerCommand(), nil) return fail.SSH(err, "running kubeadm upgrade on static worker") } diff --git a/pkg/tasks/tasks.go b/pkg/tasks/tasks.go index b39e1b7aa..6f372a456 100644 --- a/pkg/tasks/tasks.go +++ b/pkg/tasks/tasks.go @@ -324,6 +324,7 @@ func WithUpgrade(t Tasks) Tasks { append(kubernetesConfigFiles()...). // this, in the upgrade process where config rails are handled append(Tasks{ {Fn: kubeconfig.BuildKubernetesClientset, Operation: "building kubernetes clientset"}, + {Fn: uploadKubeadmToConfigMaps, Operation: "updating kubeadm configmaps"}, {Fn: runPreflightChecks, Operation: "checking preflight safetynet", Retries: 1}, {Fn: upgradeLeader, Operation: "upgrading leader control plane"}, {Fn: upgradeFollower, Operation: "upgrading follower control plane"}, diff --git a/pkg/templates/kubeadm/kubeadm.go b/pkg/templates/kubeadm/kubeadm.go index a6e952b08..28afa6da8 100644 --- a/pkg/templates/kubeadm/kubeadm.go +++ b/pkg/templates/kubeadm/kubeadm.go @@ -25,16 +25,24 @@ const ( kubeadmUpgradeNodeCommand = "kubeadm upgrade node" ) +type Config struct { + FullConfiguration string + ClusterConfiguration string + JoinConfiguration string + KubeletConfiguration string + KubeProxyConfiguration string +} + // Kubedm interface abstract differences between different kubeadm versions type Kubedm interface { - Config(s *state.State, instance kubeoneapi.HostConfig) (string, error) - ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (string, error) + Config(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) + ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) UpgradeLeaderCommand() string UpgradeFollowerCommand() string UpgradeStaticWorkerCommand() string } // New constructor -func New(ver string) (Kubedm, error) { - return &kubeadmv1beta3{version: ver}, nil +func New(ver string) Kubedm { + return &kubeadmv1beta3{version: ver} } diff --git a/pkg/templates/kubeadm/kubeadmv1beta3.go b/pkg/templates/kubeadm/kubeadmv1beta3.go index 7363cc394..29346f56a 100644 --- a/pkg/templates/kubeadm/kubeadmv1beta3.go +++ b/pkg/templates/kubeadm/kubeadmv1beta3.go @@ -23,32 +23,72 @@ import ( "k8c.io/kubeone/pkg/state" "k8c.io/kubeone/pkg/templates" "k8c.io/kubeone/pkg/templates/kubeadm/v1beta3" + + "k8s.io/apimachinery/pkg/runtime" ) type kubeadmv1beta3 struct { version string } -func (*kubeadmv1beta3) Config(s *state.State, instance kubeoneapi.HostConfig) (string, error) { +func (*kubeadmv1beta3) Config(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) { config, err := v1beta3.NewConfig(s, instance) if err != nil { - return "", err + return nil, err + } + + fullConfig, err := templates.KubernetesToYAML([]runtime.Object{ + config.InitConfiguration, + config.JoinConfiguration, + config.ClusterConfiguration, + config.KubeletConfiguration, + config.KubeProxyConfiguration, + }) + if err != nil { + return nil, fmt.Errorf("converting kubeadm configuration to yaml: %w", err) + } + + clusterConfig, err := templates.KubernetesToYAML([]runtime.Object{config.ClusterConfiguration}) + if err != nil { + return nil, fmt.Errorf("converting kubeadm ClusterConfiguration to yaml: %w", err) + } + + kubeletConfig, err := templates.KubernetesToYAML([]runtime.Object{config.KubeletConfiguration}) + if err != nil { + return nil, fmt.Errorf("converting kubeadm KubeletConfiguration to yaml: %w", err) + } + + kubeProxyConfig, err := templates.KubernetesToYAML([]runtime.Object{config.KubeProxyConfiguration}) + if err != nil { + return nil, fmt.Errorf("converting kubeadm KubeProxyConfiguration to yaml: %w", err) } - return templates.KubernetesToYAML(config) + return &Config{ + FullConfiguration: fullConfig, + ClusterConfiguration: clusterConfig, + KubeletConfiguration: kubeletConfig, + KubeProxyConfiguration: kubeProxyConfig, + }, nil } -func (*kubeadmv1beta3) ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (string, error) { +func (*kubeadmv1beta3) ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) { config, err := v1beta3.NewConfigWorker(s, instance) if err != nil { - return "", err + return nil, err + } + + joinConfig, err := templates.KubernetesToYAML([]runtime.Object{config.JoinConfiguration}) + if err != nil { + return nil, fmt.Errorf("converting kubeadm JoinConfiguration to yaml: %w", err) } - return templates.KubernetesToYAML(config) + return &Config{ + JoinConfiguration: joinConfig, + }, nil } func (k *kubeadmv1beta3) UpgradeLeaderCommand() string { - return fmt.Sprintf("kubeadm upgrade apply %s", k.version) + return fmt.Sprintf("kubeadm upgrade apply --yes %s", k.version) } func (*kubeadmv1beta3) UpgradeFollowerCommand() string { diff --git a/pkg/templates/kubeadm/v1beta3/kubeadm.go b/pkg/templates/kubeadm/v1beta3/kubeadm.go index 2e3bd7950..ebeb8a69c 100644 --- a/pkg/templates/kubeadm/v1beta3/kubeadm.go +++ b/pkg/templates/kubeadm/v1beta3/kubeadm.go @@ -77,8 +77,17 @@ var ( fixedEtcd129Constraint = semverutil.MustParseConstraint(">= 1.29") ) +type Config struct { + InitConfiguration *kubeadmv1beta3.InitConfiguration + JoinConfiguration *kubeadmv1beta3.JoinConfiguration + ClusterConfiguration *kubeadmv1beta3.ClusterConfiguration + + KubeletConfiguration runtime.Object + KubeProxyConfiguration runtime.Object +} + // NewConfig returns all required configs to init a cluster via a set of v1beta3 configs -func NewConfig(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, error) { +func NewConfig(s *state.State, host kubeoneapi.HostConfig) (*Config, error) { cluster := s.Cluster kubeSemVer, err := semver.NewVersion(cluster.Versions.Kubernetes) if err != nil { @@ -386,12 +395,18 @@ func NewConfig(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, er return nil, err } - kubeproxyConfig, err := kubernetesconfigs.NewKubeProxyConfiguration(s.Cluster) + kubeProxyConfig, err := kubernetesconfigs.NewKubeProxyConfiguration(s.Cluster) if err != nil { return nil, err } - return []runtime.Object{initConfig, joinConfig, clusterConfig, kubeletConfig, kubeproxyConfig}, nil + return &Config{ + InitConfiguration: initConfig, + JoinConfiguration: joinConfig, + ClusterConfiguration: clusterConfig, + KubeletConfiguration: kubeletConfig, + KubeProxyConfiguration: kubeProxyConfig, + }, nil } func addControllerManagerNetworkArgs(m map[string]string, clusterNetwork kubeoneapi.ClusterNetworkConfig) { @@ -418,36 +433,36 @@ func addControllerManagerNetworkArgs(m map[string]string, clusterNetwork kubeone } } -func addControlPlaneComponentsAdditionalArgs(cluster *kubeoneapi.KubeOneCluster, clusterConfig *kubeadmv1beta3.ClusterConfiguration) { +func addControlPlaneComponentsAdditionalArgs(cluster *kubeoneapi.KubeOneCluster, clusterCfg *kubeadmv1beta3.ClusterConfiguration) { if cluster.ControlPlaneComponents != nil { if cluster.ControlPlaneComponents.ControllerManager != nil { if cluster.ControlPlaneComponents.ControllerManager.Flags != nil { for k, v := range cluster.ControlPlaneComponents.ControllerManager.Flags { - clusterConfig.ControllerManager.ExtraArgs[k] = v + clusterCfg.ControllerManager.ExtraArgs[k] = v } } if cluster.ControlPlaneComponents.ControllerManager.FeatureGates != nil { - clusterConfig.ControllerManager.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.ControllerManager.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.ControllerManager.FeatureGates) + clusterCfg.ControllerManager.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.ControllerManager.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.ControllerManager.FeatureGates) } } if cluster.ControlPlaneComponents.Scheduler != nil { if cluster.ControlPlaneComponents.Scheduler.Flags != nil { for k, v := range cluster.ControlPlaneComponents.Scheduler.Flags { - clusterConfig.Scheduler.ExtraArgs[k] = v + clusterCfg.Scheduler.ExtraArgs[k] = v } } if cluster.ControlPlaneComponents.Scheduler.FeatureGates != nil { - clusterConfig.Scheduler.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.Scheduler.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.Scheduler.FeatureGates) + clusterCfg.Scheduler.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.Scheduler.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.Scheduler.FeatureGates) } } if cluster.ControlPlaneComponents.APIServer != nil { if cluster.ControlPlaneComponents.APIServer.Flags != nil { for k, v := range cluster.ControlPlaneComponents.APIServer.Flags { - clusterConfig.APIServer.ExtraArgs[k] = v + clusterCfg.APIServer.ExtraArgs[k] = v } } if cluster.ControlPlaneComponents.APIServer.FeatureGates != nil { - clusterConfig.APIServer.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.APIServer.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.APIServer.FeatureGates) + clusterCfg.APIServer.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.APIServer.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.APIServer.FeatureGates) } } } @@ -469,7 +484,7 @@ func join(ipFamily kubeoneapi.IPFamily, ipv4Subnet, ipv6Subnet string) string { } // NewConfig returns all required configs to init a cluster via a set of v13 configs -func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, error) { +func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) (*Config, error) { cluster := s.Cluster nodeRegistration := newNodeRegistration(s, host) @@ -519,7 +534,9 @@ func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Obje joinConfig.NodeRegistration = nodeRegistration - return []runtime.Object{joinConfig}, nil + return &Config{ + JoinConfiguration: joinConfig, + }, nil } func newNodeIP(host kubeoneapi.HostConfig) string {