diff --git a/cmd/create/cmd.go b/cmd/create/cmd.go index d3eac72bc4..4f7e65d420 100644 --- a/cmd/create/cmd.go +++ b/cmd/create/cmd.go @@ -63,7 +63,8 @@ func init() { Cmd.AddCommand(tuningconfigs.Cmd) Cmd.AddCommand(dnsdomains.Cmd) Cmd.AddCommand(autoscaler.Cmd) - Cmd.AddCommand(kubeletconfig.Cmd) + kubeletConfig := kubeletconfig.NewCreateKubeletConfigCommand() + Cmd.AddCommand(kubeletConfig) Cmd.AddCommand(externalauthprovider.Cmd) Cmd.AddCommand(breakglasscredential.Cmd) @@ -77,7 +78,7 @@ func init() { userrole.Cmd, ocmrole.Cmd, oidcprovider.Cmd, breakglasscredential.Cmd, admin.Cmd, autoscaler.Cmd, dnsdomains.Cmd, - externalauthprovider.Cmd, idp.Cmd, kubeletconfig.Cmd, tuningconfigs.Cmd, + externalauthprovider.Cmd, idp.Cmd, kubeletConfig, tuningconfigs.Cmd, } arguments.MarkRegionDeprecated(Cmd, globallyAvailableCommands) } diff --git a/cmd/create/kubeletconfig/cmd.go b/cmd/create/kubeletconfig/cmd.go index 15f7df35d4..051b47125b 100644 --- a/cmd/create/kubeletconfig/cmd.go +++ b/cmd/create/kubeletconfig/cmd.go @@ -17,8 +17,8 @@ limitations under the License. package kubeletconfig import ( + "context" "fmt" - "os" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/spf13/cobra" @@ -30,88 +30,92 @@ import ( "github.com/openshift/rosa/pkg/rosa" ) -var Cmd = &cobra.Command{ - Use: "kubeletconfig", - Aliases: []string{"kubelet-config"}, - Short: "Create a custom kubeletconfig for a cluster", - Long: "Create a custom kubeletconfig for a cluster", - Example: ` # Create a custom kubeletconfig with a pod-pids-limit of 5000 +const ( + use = "kubeletconfig" + short = "Create a custom kubeletconfig for a cluster" + long = short + example = ` # Create a custom kubeletconfig with a pod-pids-limit of 5000 rosa create kubeletconfig --cluster=mycluster --pod-pids-limit=5000 - `, - Run: run, - Args: cobra.NoArgs, -} + ` +) -var args struct { - podPidsLimit int -} +func NewCreateKubeletConfigCommand() *cobra.Command { + + options := NewKubeletConfigOptions() + cmd := &cobra.Command{ + Use: use, + Aliases: []string{"kubelet-config"}, + Short: short, + Long: long, + Example: example, + Run: rosa.DefaultRunner(rosa.RuntimeWithOCM(), CreateKubeletConfigRunner(options)), + Args: cobra.NoArgs, + } -func init() { - flags := Cmd.Flags() - flags.SortFlags = false - flags.IntVar( - &args.podPidsLimit, - PodPidsLimitOption, - PodPidsLimitOptionDefaultValue, - PodPidsLimitOptionUsage) - - ocm.AddClusterFlag(Cmd) - interactive.AddFlag(flags) + options.AddFlagsToCommand(cmd) + ocm.AddClusterFlag(cmd) + interactive.AddFlag(cmd.Flags()) + return cmd } -func run(_ *cobra.Command, _ []string) { - r := rosa.NewRuntime().WithOCM() - defer r.Cleanup() - - clusterKey := r.GetClusterKey() - cluster := r.FetchCluster() - - if cluster.Hypershift().Enabled() { - r.Reporter.Errorf("Hosted Control Plane clusters do not support custom KubeletConfig configuration.") - os.Exit(1) - } +func CreateKubeletConfigRunner(options *KubeletConfigOptions) rosa.CommandRunner { + return func(ctx context.Context, r *rosa.Runtime, command *cobra.Command, args []string) error { + clusterKey := r.GetClusterKey() + cluster, err := r.OCMClient.GetCluster(r.GetClusterKey(), r.Creator) + if err != nil { + return err + } - if cluster.State() != cmv1.ClusterStateReady { - r.Reporter.Errorf("Cluster '%s' is not yet ready. Current state is '%s'", clusterKey, cluster.State()) - os.Exit(1) - } + if cluster.State() != cmv1.ClusterStateReady { + return fmt.Errorf("Cluster '%s' is not yet ready. Current state is '%s'", clusterKey, cluster.State()) + } - kubeletConfig, err := r.OCMClient.GetClusterKubeletConfig(cluster.ID()) - if err != nil { - r.Reporter.Errorf("Failed getting KubeletConfig for cluster '%s': %s", - cluster.ID(), err) - os.Exit(1) - } + if !cluster.Hypershift().Enabled() { + // Classic clusters can only have a single KubeletConfig + kubeletConfig, err := r.OCMClient.GetClusterKubeletConfig(cluster.ID()) + if err != nil { + return fmt.Errorf("Failed getting KubeletConfig for cluster '%s': %s", + r.ClusterKey, err) + } + + if kubeletConfig != nil { + return fmt.Errorf("A KubeletConfig for cluster '%s' already exists. "+ + "You should edit it via 'rosa edit kubeletconfig'", clusterKey) + } + } - if kubeletConfig != nil { - r.Reporter.Errorf("A custom KubeletConfig for cluster '%s' already exists. "+ - "You should edit it via 'rosa edit kubeletconfig'", clusterKey) - os.Exit(1) - } + name, err := ValidateOrPromptForName(options.Name) + if err != nil { + return nil + } - requestedPids, err := ValidateOrPromptForRequestedPidsLimit(args.podPidsLimit, clusterKey, nil, r) - if err != nil { - os.Exit(1) - } + requestedPids, err := ValidateOrPromptForRequestedPidsLimit(options.PodPidsLimit, clusterKey, nil, r) + if err != nil { + return err + } - prompt := fmt.Sprintf("Creating the custom KubeletConfig for cluster '%s' will cause all non-Control Plane "+ - "nodes to reboot. This may cause outages to your applications. Do you wish to continue?", clusterKey) + if !cluster.Hypershift().Enabled() { + // Creating a KubeletConfig for a classic cluster must prompt the user, as the changes apply + // immediately and cause reboots of the worker nodes in their cluster + prompt := fmt.Sprintf("Creating a KubeletConfig for cluster '%s' will cause all non-Control Plane "+ + "nodes to reboot. This may cause outages to your applications. Do you wish to continue?", clusterKey) - if confirm.ConfirmRaw(prompt) { + if !confirm.ConfirmRaw(prompt) { + r.Reporter.Infof("Creation of KubeletConfig for cluster '%s' aborted.", clusterKey) + return nil + } + } r.Reporter.Debugf("Creating KubeletConfig for cluster '%s'", clusterKey) - kubeletConfigArgs := ocm.KubeletConfigArgs{PodPidsLimit: requestedPids} + kubeletConfigArgs := ocm.KubeletConfigArgs{PodPidsLimit: requestedPids, Name: name} _, err = r.OCMClient.CreateKubeletConfig(cluster.ID(), kubeletConfigArgs) if err != nil { - r.Reporter.Errorf("Failed creating custom KubeletConfig for cluster '%s': '%s'", + return fmt.Errorf("Failed creating KubeletConfig for cluster '%s': '%s'", clusterKey, err) - os.Exit(1) } + r.Reporter.Infof("Successfully created KubeletConfig for cluster '%s'", clusterKey) - r.Reporter.Infof("Successfully created custom KubeletConfig for cluster '%s'", clusterKey) - os.Exit(0) + return nil } - - r.Reporter.Infof("Creation of custom KubeletConfig for cluster '%s' aborted.", clusterKey) } diff --git a/cmd/create/kubeletconfig/cmd_test.go b/cmd/create/kubeletconfig/cmd_test.go new file mode 100644 index 0000000000..24e1070175 --- /dev/null +++ b/cmd/create/kubeletconfig/cmd_test.go @@ -0,0 +1,187 @@ +package kubeletconfig + +import ( + "context" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift-online/ocm-sdk-go/testing" + + . "github.com/openshift/rosa/pkg/kubeletconfig" + "github.com/openshift/rosa/pkg/output" + . "github.com/openshift/rosa/pkg/test" +) + +var _ = Describe("create kubeletconfig", func() { + + It("Correctly builds the command", func() { + cmd := NewCreateKubeletConfigCommand() + Expect(cmd).NotTo(BeNil()) + + Expect(cmd.Use).To(Equal(use)) + Expect(cmd.Short).To(Equal(short)) + Expect(cmd.Long).To(Equal(long)) + Expect(cmd.Args).NotTo(BeNil()) + Expect(cmd.Run).NotTo(BeNil()) + + Expect(cmd.Flags().Lookup("cluster")).NotTo(BeNil()) + Expect(cmd.Flags().Lookup("interactive")).NotTo(BeNil()) + Expect(cmd.Flags().Lookup(PodPidsLimitOption)).NotTo(BeNil()) + Expect(cmd.Flags().Lookup(NameOption)).NotTo(BeNil()) + }) + + Context("CreateKubeletConfig Runner", func() { + + var t *TestingRuntime + + BeforeEach(func() { + t = NewTestRuntime() + output.SetOutput("") + }) + + AfterEach(func() { + output.SetOutput("") + }) + + It("Returns an error if the cluster does not exist", func() { + t.ApiServer.AppendHandlers(testing.RespondWithJSON(http.StatusOK, FormatClusterList(make([]*cmv1.Cluster, 0)))) + t.SetCluster("cluster", nil) + + runner := CreateKubeletConfigRunner(NewKubeletConfigOptions()) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("There is no cluster with identifier or name 'cluster'")) + }) + + It("Returns an error if the cluster is not ready", func() { + + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateInstalling) + }) + + t.ApiServer.AppendHandlers( + testing.RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.SetCluster("cluster", nil) + + runner := CreateKubeletConfigRunner(NewKubeletConfigOptions()) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("Cluster 'cluster' is not yet ready. Current state is 'installing'")) + + }) + + It("Returns an error if a kubeletconfig already exists for classic cluster", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + }) + + config := MockKubeletConfig(func(k *cmv1.KubeletConfigBuilder) { + k.Name("test").PodPidsLimit(10000).ID("foo") + }) + + t.ApiServer.AppendHandlers( + testing.RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.AppendHandlers( + testing.RespondWithJSON(http.StatusOK, FormatResource(config))) + t.SetCluster("cluster", nil) + + runner := CreateKubeletConfigRunner(NewKubeletConfigOptions()) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("A KubeletConfig for cluster 'cluster' already exists." + + " You should edit it via 'rosa edit kubeletconfig'")) + }) + + It("Returns an error if it fails to read the kubeletconfig from OCM for classic cluster", func() { + + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + }) + + t.ApiServer.AppendHandlers( + testing.RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.AppendHandlers( + testing.RespondWithJSON(http.StatusInternalServerError, "{}")) + t.SetCluster("cluster", nil) + + runner := CreateKubeletConfigRunner(NewKubeletConfigOptions()) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("Failed getting KubeletConfig for cluster 'cluster'")) + }) + + It("Creates the KubeletConfig for HCP clusters", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + b := cmv1.HypershiftBuilder{} + b.Enabled(true) + c.Hypershift(&b) + + }) + + kubeletConfig := MockKubeletConfig(func(k *cmv1.KubeletConfigBuilder) { + k.ID("test-id").PodPidsLimit(10000).Name("testing") + }) + + t.ApiServer.AppendHandlers( + testing.RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.AppendHandlers( + testing.RespondWithJSON(http.StatusCreated, FormatResource(kubeletConfig))) + t.SetCluster("cluster", nil) + + options := NewKubeletConfigOptions() + options.PodPidsLimit = 10000 + + runner := CreateKubeletConfigRunner(options) + t.StdOutReader.Record() + + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).NotTo(HaveOccurred()) + + stdOut, _ := t.StdOutReader.Read() + Expect(stdOut).To(Equal("INFO: Successfully created KubeletConfig for cluster 'cluster'\n")) + }) + + It("Returns an error if failing to create the KubeletConfig for HCP Clusters", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + b := cmv1.HypershiftBuilder{} + b.Enabled(true) + c.Hypershift(&b) + + }) + + kubeletConfig := MockKubeletConfig(func(k *cmv1.KubeletConfigBuilder) { + k.ID("test-id").PodPidsLimit(10000).Name("testing") + }) + + t.ApiServer.AppendHandlers( + testing.RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.AppendHandlers( + testing.RespondWithJSON(http.StatusBadRequest, FormatResource(kubeletConfig))) + t.SetCluster("cluster", nil) + + options := NewKubeletConfigOptions() + options.PodPidsLimit = 10000 + + runner := CreateKubeletConfigRunner(options) + + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Failed creating KubeletConfig for cluster 'cluster':")) + }) + }) +}) diff --git a/cmd/create/kubeletconfig/kubeletconfig_suite_test.go b/cmd/create/kubeletconfig/kubeletconfig_suite_test.go new file mode 100644 index 0000000000..049160410e --- /dev/null +++ b/cmd/create/kubeletconfig/kubeletconfig_suite_test.go @@ -0,0 +1,13 @@ +package kubeletconfig + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCreateKubeletConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Create KubeletConfig Suite") +} diff --git a/pkg/kubeletconfig/config.go b/pkg/kubeletconfig/config.go index 1f4271e0e4..8aafb697c2 100644 --- a/pkg/kubeletconfig/config.go +++ b/pkg/kubeletconfig/config.go @@ -53,6 +53,21 @@ func GetInteractiveInput(maxPidsLimit int, kubeletConfig *v1.KubeletConfig) inte } } +func ValidateOrPromptForName(requestedName string) (string, error) { + + if interactive.Enabled() { + return interactive.GetString(interactive.Input{ + Question: InteractiveNameHelpPrompt, + Help: InteractiveNameHelp, + Options: nil, + Default: requestedName, + Required: false, + }) + } + + return requestedName, nil +} + // ValidateOrPromptForRequestedPidsLimit validates user provided limits or prompts via interactive mode // if the user hasn't specified any limit on the command line. func ValidateOrPromptForRequestedPidsLimit( @@ -66,10 +81,16 @@ func ValidateOrPromptForRequestedPidsLimit( r.Reporter.Infof("Enabling interactive mode") } + // If the user has provided a fully supported podPidsLimit, then no need to check the org capabilities. + if requestedPids >= MinPodPidsLimit && requestedPids <= MaxPodPidsLimit { + return requestedPids, nil + } + + // The user is trying to exceed the default ranges. This requires a capability check at the org-level maxPidsLimit, err := GetMaxPidsLimit(r.OCMClient) if err != nil { return PodPidsLimitOptionDefaultValue, - r.Reporter.Errorf("Failed to check maximum allowed Pids limit for cluster '%s'", + fmt.Errorf("Failed to check maximum allowed Pids limit for cluster '%s'", clusterKey) } @@ -78,20 +99,20 @@ func ValidateOrPromptForRequestedPidsLimit( if err != nil { return PodPidsLimitOptionDefaultValue, - r.Reporter.Errorf("Failed reading requested Pids limit for cluster '%s': '%s'", + fmt.Errorf("Failed reading requested Pids limit for cluster '%s': '%s'", clusterKey, err) } } if requestedPids < MinPodPidsLimit { return PodPidsLimitOptionDefaultValue, - r.Reporter.Errorf("The minimum value for --pod-pids-limit is '%d'. You have supplied '%d'", + fmt.Errorf("The minimum value for --pod-pids-limit is '%d'. You have supplied '%d'", MinPodPidsLimit, requestedPids) } if requestedPids > maxPidsLimit { return PodPidsLimitOptionDefaultValue, - r.Reporter.Errorf("The maximum value for --pod-pids-limit is '%d'. You have supplied '%d'", + fmt.Errorf("The maximum value for --pod-pids-limit is '%d'. You have supplied '%d'", maxPidsLimit, requestedPids) } diff --git a/pkg/kubeletconfig/consts.go b/pkg/kubeletconfig/consts.go index 2048a7b8f7..97f4a47133 100644 --- a/pkg/kubeletconfig/consts.go +++ b/pkg/kubeletconfig/consts.go @@ -4,10 +4,15 @@ const ( MinPodPidsLimit = 4096 MaxPodPidsLimit = 16384 MaxUnsafePodPidsLimit = 3694303 + NameOption = "name" + NameOptionDefaultValue = "" + NameOptionUsage = "Sets the name for this KubeletConfig (optional, generated if omitted)" PodPidsLimitOption = "pod-pids-limit" - PodPidsLimitOptionUsage = "Sets the requested pod_pids_limit for your custom KubeletConfig." + PodPidsLimitOptionUsage = "Sets the requested pod_pids_limit for this KubeletConfig." PodPidsLimitOptionDefaultValue = 0 InteractivePodPidsLimitPrompt = "Pod Pids Limit?" InteractivePodPidsLimitHelp = "Set the Pod Pids Limit field to a value between 4096 and %d" + InteractiveNameHelpPrompt = "Name?" + InteractiveNameHelp = "Set the name of this KubeletConfig (optional)" ByPassPidsLimitCapability = "capability.organization.bypass_pids_limits" ) diff --git a/pkg/kubeletconfig/options.go b/pkg/kubeletconfig/options.go new file mode 100644 index 0000000000..ae10b4dccf --- /dev/null +++ b/pkg/kubeletconfig/options.go @@ -0,0 +1,27 @@ +package kubeletconfig + +import "github.com/spf13/cobra" + +type KubeletConfigOptions struct { + Name string + PodPidsLimit int +} + +func NewKubeletConfigOptions() *KubeletConfigOptions { + return &KubeletConfigOptions{} +} + +func (k *KubeletConfigOptions) AddFlagsToCommand(cmd *cobra.Command) { + flags := cmd.Flags() + flags.SortFlags = false + flags.IntVar( + &k.PodPidsLimit, + PodPidsLimitOption, + PodPidsLimitOptionDefaultValue, + PodPidsLimitOptionUsage) + flags.StringVar( + &k.Name, + NameOption, + NameOptionDefaultValue, + NameOptionUsage) +} diff --git a/pkg/kubeletconfig/options_test.go b/pkg/kubeletconfig/options_test.go new file mode 100644 index 0000000000..3b2fea12b8 --- /dev/null +++ b/pkg/kubeletconfig/options_test.go @@ -0,0 +1,34 @@ +package kubeletconfig + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +var _ = Describe("KubeletConfigOptions", func() { + + It("Adds flags to command", func() { + cmd := &cobra.Command{} + flags := cmd.Flags() + Expect(flags).NotTo(BeNil()) + Expect(flags.Lookup(PodPidsLimitOption)).To(BeNil()) + Expect(flags.Lookup(NameOption)).To(BeNil()) + + options := NewKubeletConfigOptions() + options.AddFlagsToCommand(cmd) + + flag := flags.Lookup(PodPidsLimitOption) + assertFlag(flag, PodPidsLimitOption, PodPidsLimitOptionUsage) + + flag = flags.Lookup(NameOption) + assertFlag(flag, NameOption, NameOptionUsage) + }) +}) + +func assertFlag(flag *flag.Flag, name string, usage string) { + Expect(flag).NotTo(BeNil()) + Expect(flag.Name).To(Equal(name)) + Expect(flag.Usage).To(Equal(usage)) +} diff --git a/pkg/ocm/kubeletconfig.go b/pkg/ocm/kubeletconfig.go index 88c7a03542..9a1b1dbc57 100644 --- a/pkg/ocm/kubeletconfig.go +++ b/pkg/ocm/kubeletconfig.go @@ -9,6 +9,7 @@ import ( type KubeletConfigArgs struct { PodPidsLimit int + Name string } func (c *Client) GetClusterKubeletConfig(clusterID string) (*cmv1.KubeletConfig, error) { @@ -35,7 +36,7 @@ func (c *Client) DeleteKubeletConfig(clusterID string) error { func toOCMKubeletConfig(args KubeletConfigArgs) (*cmv1.KubeletConfig, error) { builder := &cmv1.KubeletConfigBuilder{} - kubeletConfig, err := builder.PodPidsLimit(args.PodPidsLimit).Build() + kubeletConfig, err := builder.PodPidsLimit(args.PodPidsLimit).Name(args.Name).Build() if err != nil { return nil, err } diff --git a/pkg/ocm/kubeletconfig_test.go b/pkg/ocm/kubeletconfig_test.go index 45862636d3..075383a3f4 100644 --- a/pkg/ocm/kubeletconfig_test.go +++ b/pkg/ocm/kubeletconfig_test.go @@ -131,8 +131,9 @@ var _ = Describe("KubeletConfig", Ordered, func() { ), ) - args := KubeletConfigArgs{podPidsLimit} + args := KubeletConfigArgs{podPidsLimit, kubeletName} kubeletConfig, err := ocmClient.CreateKubeletConfig(clusterId, args) + Expect(kubeletConfig.Name()).To(Equal(kubeletName)) Expect(kubeletConfig).NotTo(BeNil()) Expect(err).NotTo(HaveOccurred()) @@ -147,7 +148,7 @@ var _ = Describe("KubeletConfig", Ordered, func() { ), ) - args := KubeletConfigArgs{podPidsLimit} + args := KubeletConfigArgs{podPidsLimit, kubeletName} _, err := ocmClient.CreateKubeletConfig(clusterId, args) Expect(err).To(HaveOccurred()) }) @@ -160,7 +161,7 @@ var _ = Describe("KubeletConfig", Ordered, func() { ), ) - args := KubeletConfigArgs{podPidsLimit} + args := KubeletConfigArgs{podPidsLimit, kubeletName} kubeletConfig, err := ocmClient.UpdateKubeletConfig(clusterId, args) Expect(kubeletConfig).NotTo(BeNil()) @@ -175,7 +176,7 @@ var _ = Describe("KubeletConfig", Ordered, func() { ), ) - args := KubeletConfigArgs{podPidsLimit} + args := KubeletConfigArgs{podPidsLimit, kubeletName} _, err := ocmClient.UpdateKubeletConfig(clusterId, args) Expect(err).To(HaveOccurred()) }) diff --git a/pkg/test/helpers.go b/pkg/test/helpers.go index 9063c615b0..e85cb782c4 100644 --- a/pkg/test/helpers.go +++ b/pkg/test/helpers.go @@ -270,6 +270,10 @@ func FormatResource(resource interface{}) string { var outputJson bytes.Buffer var err error switch reflect.TypeOf(resource).String() { + case "*v1.KubeletConfig": + if res, ok := resource.(*v1.KubeletConfig); ok { + err = v1.MarshalKubeletConfig(res, &outputJson) + } case "*v1.Version": if res, ok := resource.(*v1.Version); ok { err = v1.MarshalVersion(res, &outputJson)