diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index cf399b878..deac5f34b 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -1,3 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controlplane import ( @@ -25,6 +41,10 @@ import ( cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" ) +const ( + etcdMemberConditionTypeJoined = "Joined" +) + func (c *K0sController) createMachine(ctx context.Context, name string, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane, infraRef corev1.ObjectReference, failureDomain *string) (*clusterv1.Machine, error) { machine, err := c.generateMachine(ctx, name, cluster, kcp, infraRef, failureDomain) if err != nil { @@ -243,15 +263,46 @@ func matchesTemplateClonedFrom(infraMachines map[string]*unstructured.Unstructur clonedFromGroupKind == kcp.Spec.MachineTemplate.InfrastructureRef.GroupVersionKind().GroupKind().String() } +func (c *K0sController) checkMachineLeft(ctx context.Context, name string, clientset *kubernetes.Clientset) (bool, error) { + var etcdMember unstructured.Unstructured + err := clientset.RESTClient(). + Get(). + AbsPath("/apis/etcd.k0sproject.io/v1beta1/etcdmembers/" + name). + Do(ctx). + Into(&etcdMember) + + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, fmt.Errorf("error getting etcd member: %w", err) + } + + conditions, _, err := unstructured.NestedSlice(etcdMember.Object, "status", "conditions") + if err != nil { + return false, fmt.Errorf("error getting etcd member conditions: %w", err) + } + + for _, condition := range conditions { + conditionMap := condition.(map[string]interface{}) + if conditionMap["type"] == etcdMemberConditionTypeJoined && conditionMap["status"] == "False" { + return true, nil + } + } + return false, nil +} + func (c *K0sController) markChildControlNodeToLeave(ctx context.Context, name string, clientset *kubernetes.Clientset) error { if clientset == nil { return nil } + logger := log.FromContext(ctx).WithValues("controlNode", name) + err := clientset.RESTClient(). Patch(types.MergePatchType). AbsPath("/apis/etcd.k0sproject.io/v1beta1/etcdmembers/" + name). - Body([]byte(`{"spec":{"leave":true}}`)). + Body([]byte(`{"spec":{"leave":true}, "metadata": {"annotations": {"k0smotron.io/marked-to-leave-at": "` + time.Now().String() + `"}}}`)). Do(ctx). Error() if err != nil { diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 35e3ef32a..e425642a9 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "reflect" "strings" "time" @@ -33,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/ptr" @@ -123,6 +125,7 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct // Always patch the object to update the status defer func() { log.Info("Updating status") + existingStatus := kcp.Status.DeepCopy() // Separate var for status update errors to avoid shadowing err derr := c.updateStatus(ctx, kcp, cluster) if derr != nil { @@ -130,6 +133,10 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct return } + if errors.Is(err, ErrNotReady) || reflect.DeepEqual(existingStatus, kcp.Status) { + return + } + // // Patch the status with server-side apply // kcp.ObjectMeta.ManagedFields = nil // Remove managed fields when doing server-side apply // derr = c.Status().Patch(ctx, kcp, client.Apply, client.FieldOwner(fieldOwner)) @@ -171,7 +178,7 @@ func (c *K0sController) Reconcile(ctx context.Context, req ctrl.Request) (res ct return ctrl.Result{}, err } - _, err = c.reconcile(ctx, cluster, kcp) + err = c.reconcile(ctx, cluster, kcp) if err != nil { if errors.Is(err, ErrNotReady) { return ctrl.Result{RequeueAfter: 10, Requeue: true}, nil @@ -242,119 +249,105 @@ func (c *K0sController) reconcileKubeconfig(ctx context.Context, cluster *cluste return nil } -func (c *K0sController) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (int32, error) { +func (c *K0sController) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { var err error kcp.Spec.K0sConfigSpec.K0s, err = enrichK0sConfigWithClusterData(cluster, kcp.Spec.K0sConfigSpec.K0s) if err != nil { - return kcp.Status.Replicas, err + return err } err = c.reconcileKubeconfig(ctx, cluster, kcp) if err != nil { - return kcp.Status.Replicas, fmt.Errorf("error reconciling kubeconfig secret: %w", err) + return fmt.Errorf("error reconciling kubeconfig secret: %w", err) } - replicasToReport, err := c.reconcileMachines(ctx, cluster, kcp) + err = c.reconcileMachines(ctx, cluster, kcp) if err != nil { - return replicasToReport, err + return err } - return replicasToReport, nil + return nil } -func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (int32, error) { - +func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) error { logger := log.FromContext(ctx, "cluster", cluster.Name, "kcp", kcp.Name) - replicasToReport := kcp.Spec.Replicas - machines, err := collections.GetFilteredMachinesForCluster(ctx, c, cluster, collections.ControlPlaneMachines(cluster.Name), collections.ActiveMachines) if err != nil { - return replicasToReport, fmt.Errorf("error collecting machines: %w", err) + return fmt.Errorf("error collecting machines: %w", err) } if machines == nil { - return replicasToReport, fmt.Errorf("machines collection is nil") + return fmt.Errorf("machines collection is nil") } - logger.Info("Collected machines", "count", machines.Len()) - currentReplicas := machines.Len() - desiredReplicas := kcp.Spec.Replicas - machinesToDelete := 0 - if currentReplicas > int(desiredReplicas) { - machinesToDelete = currentReplicas - int(desiredReplicas) - replicasToReport = kcp.Status.Replicas + infraMachines, err := c.getInfraMachines(ctx, machines) + if err != nil { + return fmt.Errorf("error getting infra machines: %w", err) } currentVersion, err := minVersion(machines) if err != nil { - return replicasToReport, fmt.Errorf("error getting current cluster version from machines: %w", err) + return fmt.Errorf("error getting current cluster version from machines: %w", err) } log.Log.Info("Got current cluster version", "version", currentVersion) + machineNamesToDelete := make(map[string]bool) + desiredMachineNames := make(map[string]bool) + var clusterIsUpdating bool - var oldMachines int - for _, m := range machines { + for _, m := range machines.SortedByCreationTimestamp() { if m.Spec.Version == nil || !versionMatches(m, kcp.Spec.Version) { - oldMachines++ + clusterIsUpdating = true + machineNamesToDelete[m.Name] = true + } else if !matchesTemplateClonedFrom(infraMachines, kcp, m) { + machineNamesToDelete[m.Name] = true + } else if machines.Len() > int(kcp.Spec.Replicas)+len(machineNamesToDelete) { + machineNamesToDelete[m.Name] = true + } else { + desiredMachineNames[m.Name] = true } } + log.Log.Info("Collected machines", "count", machines.Len(), "desired", kcp.Spec.Replicas, "updating", clusterIsUpdating, "deleting", len(machineNamesToDelete), "desiredMachines", desiredMachineNames) - if oldMachines > 0 { + if clusterIsUpdating { log.Log.Info("Cluster is updating", "currentVersion", currentVersion, "newVersion", kcp.Spec.Version, "strategy", kcp.Spec.UpdateStrategy) - clusterIsUpdating = true if kcp.Spec.UpdateStrategy == cpv1beta1.UpdateRecreate { - // If the cluster is running in single mode, we can't use the Recreate strategy if kcp.Spec.K0sConfigSpec.Args != nil { for _, arg := range kcp.Spec.K0sConfigSpec.Args { if arg == "--single" { - return replicasToReport, fmt.Errorf("UpdateRecreate strategy is not allowed when the cluster is running in single mode") + return fmt.Errorf("UpdateRecreate strategy is not allowed when the cluster is running in single mode") } } } - desiredReplicas += kcp.Spec.Replicas - machinesToDelete = oldMachines - replicasToReport = desiredReplicas - log.Log.Info("Calculated new replicas", "desiredReplicas", desiredReplicas, "machinesToDelete", machinesToDelete, "replicasToReport", replicasToReport, "currentReplicas", currentReplicas) } else { kubeClient, err := c.getKubeClient(ctx, cluster) if err != nil { - return replicasToReport, fmt.Errorf("error getting cluster client set for machine update: %w", err) + return fmt.Errorf("error getting cluster client set for machine update: %w", err) } err = c.createAutopilotPlan(ctx, kcp, cluster, kubeClient) if err != nil { - return replicasToReport, fmt.Errorf("error creating autopilot plan: %w", err) + return fmt.Errorf("error creating autopilot plan: %w", err) } } } - infraMachines, err := c.getInfraMachines(ctx, machines) - if err != nil { - return replicasToReport, fmt.Errorf("error getting infra machines: %w", err) - } - - machineNames := make(map[string]bool) - for _, m := range machines { - machineNames[m.Name] = true - if !matchesTemplateClonedFrom(infraMachines, kcp, m) { - desiredReplicas++ - machinesToDelete++ - } - } - - if len(machineNames) < int(desiredReplicas) { - for i := len(machineNames); i < int(desiredReplicas); i++ { - name := machineName(kcp.Name, i) - machineNames[name] = false - if len(machineNames) == int(desiredReplicas) { - break - } + i := 0 + for len(desiredMachineNames) < int(kcp.Spec.Replicas) { + name := machineName(kcp.Name, i) + log.Log.Info("desire machine", "name", len(desiredMachineNames)) + _, ok := machineNamesToDelete[name] + if !ok { + _, exists := machines[name] + desiredMachineNames[name] = exists } + i++ } + log.Log.Info("Desired machines", "count", len(desiredMachineNames)) - for name, exists := range machineNames { + for name, exists := range desiredMachineNames { if !exists || kcp.Spec.UpdateStrategy == cpv1beta1.UpdateInPlace { // Wait for the previous machine to be created to avoid etcd issues if cluster if updating @@ -365,13 +358,13 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv if clusterIsUpdating || (machines.Len() == 1 && kcp.Spec.Replicas > 1) { err := c.checkMachineIsReady(ctx, machines.Newest().Name, cluster) if err != nil { - return int32(machines.Len()), err + return err } } machineFromTemplate, err := c.createMachineFromTemplate(ctx, name, cluster, kcp) if err != nil { - return replicasToReport, fmt.Errorf("error creating machine from template: %w", err) + return fmt.Errorf("error creating machine from template: %w", err) } infraRef := corev1.ObjectReference{ @@ -384,75 +377,86 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv selectedFailureDomain := failuredomains.PickFewest(cluster.Status.FailureDomains.FilterControlPlane(), machines) machine, err := c.createMachine(ctx, name, cluster, kcp, infraRef, selectedFailureDomain) if err != nil { - return replicasToReport, fmt.Errorf("error creating machine: %w", err) + return fmt.Errorf("error creating machine: %w", err) } machines[machine.Name] = machine } err = c.createBootstrapConfig(ctx, name, cluster, kcp, machines[name]) if err != nil { - return replicasToReport, fmt.Errorf("error creating bootstrap config: %w", err) + return fmt.Errorf("error creating bootstrap config: %w", err) } } - for _, m := range machines { - if m.Spec.Version != nil && *m.Spec.Version != kcp.Spec.Version { - logger.Info("Machine version is different from K0sControlPlane version", "machine", m.Name, "machineVersion", *m.Spec.Version, "kcpVersion", kcp.Spec.Version) - continue - } + if len(machineNamesToDelete) > 0 { + for m := range machines { + if machineNamesToDelete[m] { + continue + } - if machinesToDelete > 0 { - err := c.checkMachineIsReady(ctx, m.Name, cluster) + err := c.checkMachineIsReady(ctx, m, cluster) if err != nil { - return int32(machines.Len()), err + logger.Error(err, "Error checking machine left", "machine", m) + return err } } } - if machinesToDelete > 0 { - logger.Info("Found machines to delete", "count", machinesToDelete) + if len(machineNamesToDelete) > 0 { + logger.Info("Found machines to delete", "count", len(machineNamesToDelete)) kubeClient, err := c.getKubeClient(ctx, cluster) if err != nil { - return replicasToReport, fmt.Errorf("error getting cluster client set for deletion: %w", err) + return fmt.Errorf("error getting cluster client set for deletion: %w", err) } - // Remove the last machine and report the new number of replicas to status - // On the next reconcile, the next machine will be removed - // Wait for the previous machine to be deleted to avoid etcd issues - machine := machines.Oldest() + // Remove the oldest machine abd wait for the machine to be deleted to avoid etcd issues + machine := machines.Filter(func(m *clusterv1.Machine) bool { + return machineNamesToDelete[m.Name] + }).Oldest() logger.Info("Found oldest machine to delete", "machine", machine.Name) if machine.Status.Phase == string(clusterv1.MachinePhaseDeleting) { logger.Info("Machine is being deleted, waiting for it to be deleted", "machine", machine.Name) - return kcp.Status.Replicas, fmt.Errorf("waiting for previous machine to be deleted") + return fmt.Errorf("waiting for previous machine to be deleted") } - replicasToReport-- name := machine.Name - if err := c.markChildControlNodeToLeave(ctx, name, kubeClient); err != nil { - return replicasToReport, fmt.Errorf("error marking controlnode to leave: %w", err) + + waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + err = wait.PollUntilContextCancel(waitCtx, 10*time.Second, true, func(fctx context.Context) (bool, error) { + if err := c.markChildControlNodeToLeave(fctx, name, kubeClient); err != nil { + return false, fmt.Errorf("error marking controlnode to leave: %w", err) + } + + ok, err := c.checkMachineLeft(fctx, name, kubeClient) + if err != nil { + logger.Error(err, "Error checking machine left", "machine", name) + } + return ok, err + }) + if err != nil { + return fmt.Errorf("error checking machine left: %w", err) + } + + if err := c.deleteControlNode(ctx, name, kubeClient); err != nil { + return fmt.Errorf("error deleting controlnode: %w", err) } if err := c.deleteBootstrapConfig(ctx, name, kcp); err != nil { - return replicasToReport, fmt.Errorf("error deleting machine from template: %w", err) + return fmt.Errorf("error deleting machine from template: %w", err) } if err := c.deleteMachineFromTemplate(ctx, name, cluster, kcp); err != nil { - return replicasToReport, fmt.Errorf("error deleting machine from template: %w", err) + return fmt.Errorf("error deleting machine from template: %w", err) } if err := c.deleteMachine(ctx, name, kcp); err != nil { - return replicasToReport, fmt.Errorf("error deleting machine from template: %w", err) + return fmt.Errorf("error deleting machine from template: %w", err) } - if err := c.deleteControlNode(ctx, name, kubeClient); err != nil { - return replicasToReport, fmt.Errorf("error deleting controlnode: %w", err) - } - - logger.Info("Deleted machine", "machine", name, "replicasToReport", replicasToReport) - return replicasToReport, nil + logger.Info("Deleted machine", "machine", name) } - - return replicasToReport, nil + return nil } func (c *K0sController) createBootstrapConfig(ctx context.Context, name string, _ *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane, machine *clusterv1.Machine) error { diff --git a/internal/controller/controlplane/k0s_controlplane_controller_test.go b/internal/controller/controlplane/k0s_controlplane_controller_test.go index 746f65617..c6ea41bc8 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller_test.go +++ b/internal/controller/controlplane/k0s_controlplane_controller_test.go @@ -1,3 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controlplane import ( diff --git a/internal/controller/util/kubeclient.go b/internal/controller/util/kubeclient.go index aeb4933d7..b198d5e5b 100644 --- a/internal/controller/util/kubeclient.go +++ b/internal/controller/util/kubeclient.go @@ -3,9 +3,13 @@ package util import ( "context" "fmt" + "net" + "net/http" + "time" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/transport" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -26,5 +30,29 @@ func GetKubeClient(ctx context.Context, client client.Client, cluster *clusterv1 return nil, fmt.Errorf("error generating %s restconfig: %w", cluster.Name, err) } - return kubernetes.NewForConfig(restConfig) + tCfg, err := restConfig.TransportConfig() + if err != nil { + return nil, fmt.Errorf("error generating %s transport config: %w", cluster.Name, err) + } + tlsCfg, err := transport.TLSConfigFor(tCfg) + if err != nil { + return nil, fmt.Errorf("error generating %s tls config: %w", cluster.Name, err) + } + + // Disable keep-alive to avoid hanging connections + cl := http.DefaultClient + cl.Transport = &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: 3 * time.Second, + KeepAlive: -1, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 10, + IdleConnTimeout: 5 * time.Second, + TLSHandshakeTimeout: 5 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: tlsCfg, + } + + return kubernetes.NewForConfigAndClient(restConfig, cl) } diff --git a/inttest/capi-docker-machine-change-template/capi_docker_machine_change_template_test.go b/inttest/capi-docker-machine-change-template/capi_docker_machine_change_template_test.go index b1765691c..7eb83ffa5 100644 --- a/inttest/capi-docker-machine-change-template/capi_docker_machine_change_template_test.go +++ b/inttest/capi-docker-machine-change-template/capi_docker_machine_change_template_test.go @@ -42,11 +42,12 @@ import ( type CAPIDockerMachineChangeTemplate struct { suite.Suite - client *kubernetes.Clientset - restConfig *rest.Config - clusterYamlsPath string - clusterYamlsUpdatePath string - ctx context.Context + client *kubernetes.Clientset + restConfig *rest.Config + clusterYamlsPath string + clusterYamlsUpdatePath string + clusterYamlsSecondUpdatePath string + ctx context.Context } func TestCAPIDockerMachineChangeTemplate(t *testing.T) { @@ -74,6 +75,8 @@ func (s *CAPIDockerMachineChangeTemplate) SetupSuite() { s.Require().NoError(os.WriteFile(s.clusterYamlsPath, []byte(dockerClusterYaml), 0644)) s.clusterYamlsUpdatePath = tmpDir + "/update.yaml" s.Require().NoError(os.WriteFile(s.clusterYamlsUpdatePath, []byte(controlPlaneUpdate), 0644)) + s.clusterYamlsSecondUpdatePath = tmpDir + "/update2.yaml" + s.Require().NoError(os.WriteFile(s.clusterYamlsSecondUpdatePath, []byte(controlPlaneSecondUpdate), 0644)) s.ctx, _ = util.NewSuiteContext(s.T()) } @@ -147,7 +150,6 @@ func (s *CAPIDockerMachineChangeTemplate) TestCAPIControlPlaneDockerDownScaling( s.T().Log("updating cluster objects") s.updateClusterObjects() - // nolint:staticcheck err = wait.PollUntilContextCancel(s.ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) { newNodeIDs, err := util.GetControlPlaneNodesIDs("docker-test-") if err != nil { @@ -176,6 +178,38 @@ func (s *CAPIDockerMachineChangeTemplate) TestCAPIControlPlaneDockerDownScaling( Into(&obj) s.Require().NoError(err) s.Require().Equal("docker-test-cp-template-new", obj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation]) + + time.Sleep(time.Minute) + s.T().Log("updating cluster objects again") + s.updateClusterObjectsAgain() + + err = wait.PollUntilContextCancel(s.ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) { + newNodeIDs, err := util.GetControlPlaneNodesIDs("docker-test-") + if err != nil { + return false, nil + } + + return len(newNodeIDs) == 6, nil + }) + s.Require().NoError(err) + + err = wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { + veryNewNodeIDs, err := util.GetControlPlaneNodesIDs("docker-test-") + if err != nil { + return false, nil + } + + return len(veryNewNodeIDs) == 3, nil + }) + s.Require().NoError(err) + + err = s.client.RESTClient(). + Get(). + AbsPath("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/default/dockermachines/docker-test-2"). + Do(s.ctx). + Into(&obj) + s.Require().NoError(err) + s.Require().Equal("docker-test-cp-template-new-2", obj.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation]) } func (s *CAPIDockerMachineChangeTemplate) applyClusterObjects() { @@ -190,6 +224,12 @@ func (s *CAPIDockerMachineChangeTemplate) updateClusterObjects() { s.Require().NoError(err, "failed to update cluster objects: %s", string(out)) } +func (s *CAPIDockerMachineChangeTemplate) updateClusterObjectsAgain() { + // Exec via kubectl + out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsSecondUpdatePath).CombinedOutput() + s.Require().NoError(err, "failed to update cluster objects: %s", string(out)) +} + func (s *CAPIDockerMachineChangeTemplate) deleteCluster() { // Exec via kubectl out, err := exec.Command("kubectl", "delete", "-f", s.clusterYamlsPath).CombinedOutput() @@ -354,3 +394,43 @@ spec: name: docker-test-cp-template-new namespace: default ` + +var controlPlaneSecondUpdate = ` +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: docker-test-cp-template-new-2 + namespace: default +spec: + template: + spec: + customImage: kindest/node:v1.31.0 +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: docker-test +spec: + replicas: 3 + version: v1.31.2+k0s.0 + updateStrategy: Recreate + k0sConfigSpec: + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: docker-test-cp-template-new-2 + namespace: default +`