Skip to content

Commit

Permalink
Refactor the upgrade process to stop using config on upgrade
Browse files Browse the repository at this point in the history
Signed-off-by: Marko Mudrinić <mudrinic.mare@gmail.com>
  • Loading branch information
xmudrii committed Jun 21, 2024
1 parent bce2ed9 commit 4bff91e
Show file tree
Hide file tree
Showing 10 changed files with 162 additions and 46 deletions.
2 changes: 1 addition & 1 deletion pkg/scripts/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ var (
`)

kubeadmUpgradeScriptTemplate = heredoc.Doc(`
echo yes | sudo {{ .KUBEADM_UPGRADE }}{{ if .LEADER }} --config={{ .WORK_DIR }}/cfg/master_{{ .NODE_ID }}.yaml{{ end }}
sudo {{ .KUBEADM_UPGRADE }}
sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \;
`)

Expand Down
2 changes: 1 addition & 1 deletion pkg/scripts/kubeadm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ func TestKubeadmUpgrade(t *testing.T) {
name: "leader",
args: args{
workdir: "some",
kubeadmCmd: "kubeadm upgrade apply v1.1.1",
kubeadmCmd: "kubeadm upgrade apply --yes v1.1.1",
leader: true,
},
},
Expand Down
2 changes: 1 addition & 1 deletion pkg/scripts/testdata/TestKubeadmUpgrade-leader.golden
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
set -xeuo pipefail
export "PATH=$PATH:/sbin:/usr/local/bin:/opt/bin"
echo yes | sudo kubeadm upgrade apply v1.1.1 --config=some/cfg/master_0.yaml
sudo kubeadm upgrade apply --yes v1.1.1
sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \;
2 changes: 1 addition & 1 deletion pkg/scripts/testdata/TestKubeadmUpgrade-v1beta2.golden
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
set -xeuo pipefail
export "PATH=$PATH:/sbin:/usr/local/bin:/opt/bin"
echo yes | sudo kubeadm upgrade node
sudo kubeadm upgrade node
sudo find /etc/kubernetes/pki/ -name *.crt -exec chmod 600 {} \;
71 changes: 65 additions & 6 deletions pkg/tasks/kubeadm_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package tasks

import (
"context"
"fmt"

kubeoneapi "k8c.io/kubeone/pkg/apis/kubeone"
Expand All @@ -25,6 +26,11 @@ import (
"k8c.io/kubeone/pkg/scripts"
"k8c.io/kubeone/pkg/state"
"k8c.io/kubeone/pkg/templates/kubeadm"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func determinePauseImage(s *state.State) error {
Expand Down Expand Up @@ -58,10 +64,7 @@ func generateKubeadm(s *state.State) error {
return err
}

kubeadmProvider, err := kubeadm.New(s.Cluster.Versions.Kubernetes)
if err != nil {
return err
}
kubeadmProvider := kubeadm.New(s.Cluster.Versions.Kubernetes)

for idx := range s.Cluster.ControlPlane.Hosts {
node := s.Cluster.ControlPlane.Hosts[idx]
Expand All @@ -70,7 +73,7 @@ func generateKubeadm(s *state.State) error {
return err
}

s.Configuration.AddFile(fmt.Sprintf("cfg/master_%d.yaml", node.ID), kubeadmConf)
s.Configuration.AddFile(fmt.Sprintf("cfg/master_%d.yaml", node.ID), kubeadmConf.FullConfiguration)
}

for idx := range s.Cluster.StaticWorkers.Hosts {
Expand All @@ -80,7 +83,7 @@ func generateKubeadm(s *state.State) error {
return err
}

s.Configuration.AddFile(fmt.Sprintf("cfg/worker_%d.yaml", node.ID), kubeadmConf)
s.Configuration.AddFile(fmt.Sprintf("cfg/worker_%d.yaml", node.ID), kubeadmConf.JoinConfiguration)
}

return s.RunTaskOnAllNodes(uploadKubeadmToNode, state.RunParallel)
Expand All @@ -89,3 +92,59 @@ func generateKubeadm(s *state.State) error {
func uploadKubeadmToNode(s *state.State, _ *kubeoneapi.HostConfig, conn executor.Interface) error {
return s.Configuration.UploadTo(conn, s.WorkDir)
}

func uploadKubeadmToConfigMaps(s *state.State) error {
s.Logger.Infof("Rewriting kubeadm ConfigMaps...")

leader, err := s.Cluster.Leader()
if err != nil {
return err
}

kubeadmProvider := kubeadm.New(s.Cluster.Versions.Kubernetes)

kubeadmConfig, err := kubeadmProvider.Config(s, leader)
if err != nil {
return err
}

updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return updateConfigMap(s, "kubeadm-config", metav1.NamespaceSystem, "ClusterConfiguration", kubeadmConfig.ClusterConfiguration)
})
if updateErr != nil {
return err
}

updateErr = retry.RetryOnConflict(retry.DefaultRetry, func() error {
return updateConfigMap(s, "kubelet-config", metav1.NamespaceSystem, "kubelet", kubeadmConfig.KubeletConfiguration)
})
if updateErr != nil {
return err
}

updateErr = retry.RetryOnConflict(retry.DefaultRetry, func() error {
return updateConfigMap(s, "kube-proxy", metav1.NamespaceSystem, "config.conf", kubeadmConfig.KubeProxyConfiguration)
})
if updateErr != nil {
return err
}

return nil
}

func updateConfigMap(s *state.State, name, namespace, key, value string) error {
configMap := corev1.ConfigMap{}
objKey := client.ObjectKey{
Name: name,
Namespace: namespace,
}

err := s.DynamicClient.Get(context.Background(), objKey, &configMap)
if err != nil {
return err
}

configMap.Data[key] = value

return s.DynamicClient.Update(s.Context, &configMap)
}
17 changes: 4 additions & 13 deletions pkg/tasks/kubeadm_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@ import (
)

func upgradeLeaderControlPlane(s *state.State, nodeID int) error {
kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes)
if err != nil {
return err
}
kadm := kubeadm.New(s.Cluster.Versions.Kubernetes)

cmd, err := scripts.KubeadmUpgrade(kadm.UpgradeLeaderCommand(), s.WorkDir, true, nodeID)
if err != nil {
Expand All @@ -40,10 +37,7 @@ func upgradeLeaderControlPlane(s *state.State, nodeID int) error {
}

func upgradeFollowerControlPlane(s *state.State, nodeID int) error {
kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes)
if err != nil {
return err
}
kadm := kubeadm.New(s.Cluster.Versions.Kubernetes)

cmd, err := scripts.KubeadmUpgrade(kadm.UpgradeFollowerCommand(), s.WorkDir, false, nodeID)
if err != nil {
Expand All @@ -56,12 +50,9 @@ func upgradeFollowerControlPlane(s *state.State, nodeID int) error {
}

func upgradeStaticWorker(s *state.State) error {
kadm, err := kubeadm.New(s.Cluster.Versions.Kubernetes)
if err != nil {
return err
}
kadm := kubeadm.New(s.Cluster.Versions.Kubernetes)

_, _, err = s.Runner.Run(`sudo `+kadm.UpgradeStaticWorkerCommand(), nil)
_, _, err := s.Runner.Run(`sudo `+kadm.UpgradeStaticWorkerCommand(), nil)

return fail.SSH(err, "running kubeadm upgrade on static worker")
}
1 change: 1 addition & 0 deletions pkg/tasks/tasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,7 @@ func WithUpgrade(t Tasks) Tasks {
append(kubernetesConfigFiles()...). // this, in the upgrade process where config rails are handled
append(Tasks{
{Fn: kubeconfig.BuildKubernetesClientset, Operation: "building kubernetes clientset"},
{Fn: uploadKubeadmToConfigMaps, Operation: "updating kubeadm configmaps"},
{Fn: runPreflightChecks, Operation: "checking preflight safetynet", Retries: 1},
{Fn: upgradeLeader, Operation: "upgrading leader control plane"},
{Fn: upgradeFollower, Operation: "upgrading follower control plane"},
Expand Down
16 changes: 12 additions & 4 deletions pkg/templates/kubeadm/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,24 @@ const (
kubeadmUpgradeNodeCommand = "kubeadm upgrade node"
)

type Config struct {
FullConfiguration string
ClusterConfiguration string
JoinConfiguration string
KubeletConfiguration string
KubeProxyConfiguration string
}

// Kubedm interface abstract differences between different kubeadm versions
type Kubedm interface {
Config(s *state.State, instance kubeoneapi.HostConfig) (string, error)
ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (string, error)
Config(s *state.State, instance kubeoneapi.HostConfig) (*Config, error)
ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (*Config, error)
UpgradeLeaderCommand() string
UpgradeFollowerCommand() string
UpgradeStaticWorkerCommand() string
}

// New constructor
func New(ver string) (Kubedm, error) {
return &kubeadmv1beta3{version: ver}, nil
func New(ver string) Kubedm {
return &kubeadmv1beta3{version: ver}
}
54 changes: 47 additions & 7 deletions pkg/templates/kubeadm/kubeadmv1beta3.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,32 +23,72 @@ import (
"k8c.io/kubeone/pkg/state"
"k8c.io/kubeone/pkg/templates"
"k8c.io/kubeone/pkg/templates/kubeadm/v1beta3"

"k8s.io/apimachinery/pkg/runtime"
)

type kubeadmv1beta3 struct {
version string
}

func (*kubeadmv1beta3) Config(s *state.State, instance kubeoneapi.HostConfig) (string, error) {
func (*kubeadmv1beta3) Config(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) {
config, err := v1beta3.NewConfig(s, instance)
if err != nil {
return "", err
return nil, err
}

fullConfig, err := templates.KubernetesToYAML([]runtime.Object{
config.InitConfiguration,
config.JoinConfiguration,
config.ClusterConfiguration,
config.KubeletConfiguration,
config.KubeProxyConfiguration,
})
if err != nil {
return nil, fmt.Errorf("converting kubeadm configuration to yaml: %w", err)
}

clusterConfig, err := templates.KubernetesToYAML([]runtime.Object{config.ClusterConfiguration})
if err != nil {
return nil, fmt.Errorf("converting kubeadm ClusterConfiguration to yaml: %w", err)
}

kubeletConfig, err := templates.KubernetesToYAML([]runtime.Object{config.KubeletConfiguration})
if err != nil {
return nil, fmt.Errorf("converting kubeadm KubeletConfiguration to yaml: %w", err)
}

kubeProxyConfig, err := templates.KubernetesToYAML([]runtime.Object{config.KubeProxyConfiguration})
if err != nil {
return nil, fmt.Errorf("converting kubeadm KubeProxyConfiguration to yaml: %w", err)
}

return templates.KubernetesToYAML(config)
return &Config{
FullConfiguration: fullConfig,
ClusterConfiguration: clusterConfig,
KubeletConfiguration: kubeletConfig,
KubeProxyConfiguration: kubeProxyConfig,
}, nil
}

func (*kubeadmv1beta3) ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (string, error) {
func (*kubeadmv1beta3) ConfigWorker(s *state.State, instance kubeoneapi.HostConfig) (*Config, error) {
config, err := v1beta3.NewConfigWorker(s, instance)
if err != nil {
return "", err
return nil, err
}

joinConfig, err := templates.KubernetesToYAML([]runtime.Object{config.JoinConfiguration})
if err != nil {
return nil, fmt.Errorf("converting kubeadm JoinConfiguration to yaml: %w", err)
}

return templates.KubernetesToYAML(config)
return &Config{
JoinConfiguration: joinConfig,
}, nil
}

func (k *kubeadmv1beta3) UpgradeLeaderCommand() string {
return fmt.Sprintf("kubeadm upgrade apply %s", k.version)
return fmt.Sprintf("kubeadm upgrade apply --yes %s", k.version)
}

func (*kubeadmv1beta3) UpgradeFollowerCommand() string {
Expand Down
41 changes: 29 additions & 12 deletions pkg/templates/kubeadm/v1beta3/kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,17 @@ var (
fixedEtcd129Constraint = semverutil.MustParseConstraint(">= 1.29")
)

type Config struct {
InitConfiguration *kubeadmv1beta3.InitConfiguration
JoinConfiguration *kubeadmv1beta3.JoinConfiguration
ClusterConfiguration *kubeadmv1beta3.ClusterConfiguration

KubeletConfiguration runtime.Object
KubeProxyConfiguration runtime.Object
}

// NewConfig returns all required configs to init a cluster via a set of v1beta3 configs
func NewConfig(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, error) {
func NewConfig(s *state.State, host kubeoneapi.HostConfig) (*Config, error) {
cluster := s.Cluster
kubeSemVer, err := semver.NewVersion(cluster.Versions.Kubernetes)
if err != nil {
Expand Down Expand Up @@ -386,12 +395,18 @@ func NewConfig(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, er
return nil, err
}

kubeproxyConfig, err := kubernetesconfigs.NewKubeProxyConfiguration(s.Cluster)
kubeProxyConfig, err := kubernetesconfigs.NewKubeProxyConfiguration(s.Cluster)
if err != nil {
return nil, err
}

return []runtime.Object{initConfig, joinConfig, clusterConfig, kubeletConfig, kubeproxyConfig}, nil
return &Config{
InitConfiguration: initConfig,
JoinConfiguration: joinConfig,
ClusterConfiguration: clusterConfig,
KubeletConfiguration: kubeletConfig,
KubeProxyConfiguration: kubeProxyConfig,
}, nil
}

func addControllerManagerNetworkArgs(m map[string]string, clusterNetwork kubeoneapi.ClusterNetworkConfig) {
Expand All @@ -418,36 +433,36 @@ func addControllerManagerNetworkArgs(m map[string]string, clusterNetwork kubeone
}
}

func addControlPlaneComponentsAdditionalArgs(cluster *kubeoneapi.KubeOneCluster, clusterConfig *kubeadmv1beta3.ClusterConfiguration) {
func addControlPlaneComponentsAdditionalArgs(cluster *kubeoneapi.KubeOneCluster, clusterCfg *kubeadmv1beta3.ClusterConfiguration) {
if cluster.ControlPlaneComponents != nil {
if cluster.ControlPlaneComponents.ControllerManager != nil {
if cluster.ControlPlaneComponents.ControllerManager.Flags != nil {
for k, v := range cluster.ControlPlaneComponents.ControllerManager.Flags {
clusterConfig.ControllerManager.ExtraArgs[k] = v
clusterCfg.ControllerManager.ExtraArgs[k] = v
}
}
if cluster.ControlPlaneComponents.ControllerManager.FeatureGates != nil {
clusterConfig.ControllerManager.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.ControllerManager.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.ControllerManager.FeatureGates)
clusterCfg.ControllerManager.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.ControllerManager.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.ControllerManager.FeatureGates)
}
}
if cluster.ControlPlaneComponents.Scheduler != nil {
if cluster.ControlPlaneComponents.Scheduler.Flags != nil {
for k, v := range cluster.ControlPlaneComponents.Scheduler.Flags {
clusterConfig.Scheduler.ExtraArgs[k] = v
clusterCfg.Scheduler.ExtraArgs[k] = v
}
}
if cluster.ControlPlaneComponents.Scheduler.FeatureGates != nil {
clusterConfig.Scheduler.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.Scheduler.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.Scheduler.FeatureGates)
clusterCfg.Scheduler.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.Scheduler.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.Scheduler.FeatureGates)
}
}
if cluster.ControlPlaneComponents.APIServer != nil {
if cluster.ControlPlaneComponents.APIServer.Flags != nil {
for k, v := range cluster.ControlPlaneComponents.APIServer.Flags {
clusterConfig.APIServer.ExtraArgs[k] = v
clusterCfg.APIServer.ExtraArgs[k] = v
}
}
if cluster.ControlPlaneComponents.APIServer.FeatureGates != nil {
clusterConfig.APIServer.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterConfig.APIServer.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.APIServer.FeatureGates)
clusterCfg.APIServer.ExtraArgs["feature-gates"] = mergeFeatureGates(clusterCfg.APIServer.ExtraArgs["feature-gates"], cluster.ControlPlaneComponents.APIServer.FeatureGates)
}
}
}
Expand All @@ -469,7 +484,7 @@ func join(ipFamily kubeoneapi.IPFamily, ipv4Subnet, ipv6Subnet string) string {
}

// NewConfig returns all required configs to init a cluster via a set of v13 configs
func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Object, error) {
func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) (*Config, error) {
cluster := s.Cluster

nodeRegistration := newNodeRegistration(s, host)
Expand Down Expand Up @@ -519,7 +534,9 @@ func NewConfigWorker(s *state.State, host kubeoneapi.HostConfig) ([]runtime.Obje

joinConfig.NodeRegistration = nodeRegistration

return []runtime.Object{joinConfig}, nil
return &Config{
JoinConfiguration: joinConfig,
}, nil
}

func newNodeIP(host kubeoneapi.HostConfig) string {
Expand Down

0 comments on commit 4bff91e

Please sign in to comment.