Skip to content

Commit

Permalink
OCM-7257 | feat: Refactor list machine pool cmd to use new default ru…
Browse files Browse the repository at this point in the history
…nner
  • Loading branch information
hunterkepley committed May 9, 2024
1 parent bb3f601 commit e151f74
Show file tree
Hide file tree
Showing 5 changed files with 258 additions and 33 deletions.
4 changes: 2 additions & 2 deletions cmd/describe/machinepool/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func DescribeMachinePoolRunner(userOptions DescribeMachinepoolUserOptions) rosa.
err := cmd.ParseFlags(argv)
userOptions.machinepool = cmd.Flag("machinepool").Value.String()
if err != nil {
return fmt.Errorf("unable to parse flags: %s", err)
return fmt.Errorf("Unable to parse flags: %s", err)
}
}
err := options.Bind(userOptions)
Expand All @@ -83,7 +83,7 @@ func DescribeMachinePoolRunner(userOptions DescribeMachinepoolUserOptions) rosa.
clusterKey := runtime.GetClusterKey()
cluster := runtime.FetchCluster()
if cluster.State() != cmv1.ClusterStateReady {
return fmt.Errorf("cluster '%s' is not yet ready", clusterKey)
return fmt.Errorf("Cluster '%s' is not yet ready", clusterKey)
}

service := machinepool.NewMachinePoolService()
Expand Down
5 changes: 3 additions & 2 deletions cmd/list/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ func init() {
Cmd.AddCommand(gates.Cmd)
Cmd.AddCommand(idp.Cmd)
Cmd.AddCommand(ingress.Cmd)
Cmd.AddCommand(machinepool.Cmd)
machinePoolCommand := machinepool.NewListMachinePoolCommand()
Cmd.AddCommand(machinePoolCommand)
Cmd.AddCommand(region.Cmd)
Cmd.AddCommand(upgrade.Cmd)
Cmd.AddCommand(user.Cmd)
Expand Down Expand Up @@ -86,7 +87,7 @@ func init() {
oidcprovider.Cmd, cluster.Cmd,
breakglasscredential.Cmd, addon.Cmd,
externalauthprovider.Cmd, dnsdomains.Cmd,
gates.Cmd, idp.Cmd, ingress.Cmd, machinepool.Cmd,
gates.Cmd, idp.Cmd, ingress.Cmd, machinePoolCommand,
operatorroles.Cmd, region.Cmd, rhRegion.Cmd,
service.Cmd, tuningconfigs.Cmd, upgrade.Cmd,
user.Cmd, version.Cmd,
Expand Down
70 changes: 41 additions & 29 deletions cmd/list/machinepool/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ limitations under the License.
package machinepool

import (
"os"
"context"
"fmt"

cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
"github.com/spf13/cobra"
Expand All @@ -28,38 +29,49 @@ import (
"github.com/openshift/rosa/pkg/rosa"
)

var Cmd = &cobra.Command{
Use: "machinepools",
Aliases: []string{"machinepool", "machine-pools", "machine-pool"},
Short: "List cluster machine pools",
Long: "List machine pools configured on a cluster.",
Example: ` # List all machine pools on a cluster named "mycluster"
rosa list machinepools --cluster=mycluster`,
Run: run,
Args: cobra.NoArgs,
}
const (
use = "machinepools"
short = "List cluster machine pools"
long = "List machine pools configured on a cluster."
example = ` # List all machine pools on a cluster named "mycluster"
rosa list machinepools --cluster=mycluster`
)

func init() {
ocm.AddClusterFlag(Cmd)
output.AddFlag(Cmd)
}
var (
aliases = []string{"machinepool", "machine-pools", "machine-pool"}
)

func run(_ *cobra.Command, _ []string) {
r := rosa.NewRuntime().WithAWS().WithOCM()
defer r.Cleanup()
func NewListMachinePoolCommand() *cobra.Command {
cmd := &cobra.Command{
Use: use,
Short: short,
Long: long,
Aliases: aliases,
Example: example,
Args: cobra.NoArgs,
Run: rosa.DefaultRunner(rosa.RuntimeWithOCM(), ListMachinePoolRunner()),
}

output.AddFlag(cmd)
ocm.AddClusterFlag(cmd)
return cmd
}

clusterKey := r.GetClusterKey()
func ListMachinePoolRunner() rosa.CommandRunner {
return func(_ context.Context, runtime *rosa.Runtime, cmd *cobra.Command, _ []string) error {
clusterKey := runtime.GetClusterKey()

cluster := r.FetchCluster()
if cluster.State() != cmv1.ClusterStateReady &&
cluster.State() != cmv1.ClusterStateHibernating {
r.Reporter.Errorf("Cluster '%s' is not yet ready", clusterKey)
os.Exit(1)
}
cluster := runtime.FetchCluster()
if cluster.State() != cmv1.ClusterStateReady &&
cluster.State() != cmv1.ClusterStateHibernating {
return fmt.Errorf("Cluster '%s' is not yet ready", clusterKey)
}

service := machinepool.NewMachinePoolService()
err := service.ListMachinePools(r, clusterKey, cluster)
if err != nil {
r.Reporter.Errorf("Failed to list machinepools: %s", err)
service := machinepool.NewMachinePoolService()
err := service.ListMachinePools(runtime, clusterKey, cluster)
if err != nil {
return fmt.Errorf("Failed to list machinepools: %s", err)
}
return nil
}
}
199 changes: 199 additions & 0 deletions cmd/list/machinepool/cmd_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
package machinepool

import (
"context"
"fmt"
"net/http"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/format"
cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1"
. "github.com/openshift-online/ocm-sdk-go/testing"

. "github.com/openshift/rosa/pkg/output"
"github.com/openshift/rosa/pkg/test"
)

const (
nodePoolName = "nodepool85"
clusterId = "24vf9iitg3p6tlml88iml6j6mu095mh8"
singleNodePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS" +
" AVAILABILITY ZONE SUBNET VERSION AUTOREPAIR \nnodepool85 No /0" +
" m5.xlarge us-east-1a 4.12.24 No \n"
singleMachinePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS AVAILABILITY " +
"ZONES SUBNETS SPOT INSTANCES DISK SIZE SG IDs\nnodepool85 No 0 " +
" m5.xlarge us-east-1a, us-east-1b, us-east-1c " +
"Yes (max $5) default \n"
multipleMachinePoolOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS " +
"AVAILABILITY ZONES SUBNETS SPOT INSTANCES DISK SIZE SG IDs\nnodepool85 No " +
" 0 m5.xlarge us-east-1a, us-east-1b, us-east-1c " +
" Yes (max $5) default \nnodepool852 No 0 m5.xlarge test=label " +
" us-east-1a, us-east-1b, us-east-1c Yes (max $5) default " +
"\nnodepool853 Yes 1-100 m5.xlarge test=label test=taint: " +
"us-east-1a, us-east-1b, us-east-1c Yes (max $5) default \n"
multipleNodePoolsOutput = "ID AUTOSCALING REPLICAS INSTANCE TYPE LABELS TAINTS" +
" AVAILABILITY ZONE SUBNET VERSION AUTOREPAIR \nnodepool85 No" +
" /0 m5.xlarge us-east-1a " +
" 4.12.24 No \nnodepool852 Yes " +
"/\n - Min replicas: 100\n - Max replicas: 1000 m5.xlarge test=label us-east-1a 4.12.24 No \n"
)

var _ = Describe("List machine pool", func() {
Context("List machine pool command", func() {
// Full diff for long string to help debugging
format.TruncatedDiff = false

mockClusterReady := test.MockCluster(func(c *cmv1.ClusterBuilder) {
c.AWS(cmv1.NewAWS().SubnetIDs("subnet-0b761d44d3d9a4663", "subnet-0f87f640e56934cbc"))
c.Region(cmv1.NewCloudRegion().ID("us-east-1"))
c.State(cmv1.ClusterStateReady)
c.Hypershift(cmv1.NewHypershift().Enabled(true))
})

hypershiftClusterReady := test.FormatClusterList([]*cmv1.Cluster{mockClusterReady})

mockClassicClusterReady := test.MockCluster(func(c *cmv1.ClusterBuilder) {
c.AWS(cmv1.NewAWS().SubnetIDs("subnet-0b761d44d3d9a4663", "subnet-0f87f640e56934cbc"))
c.Region(cmv1.NewCloudRegion().ID("us-east-1"))
c.State(cmv1.ClusterStateReady)
c.Hypershift(cmv1.NewHypershift().Enabled(false))
})
classicClusterReady := test.FormatClusterList([]*cmv1.Cluster{mockClassicClusterReady})

mpResponse := formatMachinePool()
multipleMpResponse := formatMachinePools()

nodePoolResponse := formatNodePool()
multipleNpResponse := formatNodePools()

var t *test.TestingRuntime

BeforeEach(func() {
t = test.NewTestRuntime()
SetOutput("")
})
Context("Hypershift", func() {
It("Lists nodepool in hypershift cluster", func() {
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, hypershiftClusterReady))
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, nodePoolResponse))
runner := ListMachinePoolRunner()
err := t.StdOutReader.Record()
Expect(err).ToNot(HaveOccurred())
cmd := NewListMachinePoolCommand()
err = cmd.Flag("cluster").Value.Set(clusterId)
Expect(err).ToNot(HaveOccurred())
err = runner(context.Background(), t.RosaRuntime, cmd, []string{})
Expect(err).ToNot(HaveOccurred())
stdout, err := t.StdOutReader.Read()
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal(singleNodePoolOutput))
})
It("Lists multiple nodepools in hypershift cluster", func() {
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, hypershiftClusterReady))
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, multipleNpResponse))
runner := ListMachinePoolRunner()
err := t.StdOutReader.Record()
Expect(err).ToNot(HaveOccurred())
cmd := NewListMachinePoolCommand()
err = cmd.Flag("cluster").Value.Set(clusterId)
Expect(err).ToNot(HaveOccurred())
err = runner(context.Background(), t.RosaRuntime, cmd, []string{})
Expect(err).ToNot(HaveOccurred())
stdout, err := t.StdOutReader.Read()
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal(multipleNodePoolsOutput))
})
})
Context("ROSA Classic", func() {
It("Lists machinepool in classic cluster", func() {
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, classicClusterReady))
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, mpResponse))
runner := ListMachinePoolRunner()
err := t.StdOutReader.Record()
Expect(err).ToNot(HaveOccurred())
cmd := NewListMachinePoolCommand()
err = cmd.Flag("cluster").Value.Set(clusterId)
Expect(err).ToNot(HaveOccurred())
err = runner(context.Background(), t.RosaRuntime, cmd, []string{})
Expect(err).ToNot(HaveOccurred())
stdout, err := t.StdOutReader.Read()
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal(singleMachinePoolOutput))
})
It("Lists multiple machinepools in classic cluster", func() {
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, classicClusterReady))
t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, multipleMpResponse))
runner := ListMachinePoolRunner()
err := t.StdOutReader.Record()
Expect(err).ToNot(HaveOccurred())
cmd := NewListMachinePoolCommand()
err = cmd.Flag("cluster").Value.Set(clusterId)
Expect(err).ToNot(HaveOccurred())
err = runner(context.Background(), t.RosaRuntime, cmd, []string{})
Expect(err).ToNot(HaveOccurred())
stdout, err := t.StdOutReader.Read()
Expect(err).ToNot(HaveOccurred())
Expect(stdout).To(Equal(multipleMachinePoolOutput))
})
})
})
})

// formatNodePool simulates the output of APIs for a fake node pool list
func formatNodePool() string {
version := cmv1.NewVersion().ID("4.12.24").RawID("openshift-4.12.24")
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge")
nodeDrain := cmv1.NewValue().Value(1).Unit("minute")
nodePool, err := cmv1.NewNodePool().ID(nodePoolName).Version(version).
AWSNodePool(awsNodePool).AvailabilityZone("us-east-1a").NodeDrainGracePeriod(nodeDrain).Build()
Expect(err).ToNot(HaveOccurred())
return fmt.Sprintf("{\n \"items\": [\n %s\n ],\n \"page\": 0,\n \"size\": 1,\n \"total\": 1\n}",
test.FormatResource(nodePool))
}

// formatNodePools simulates the output of APIs for a fake node pool list (multiple)
func formatNodePools() string {
version := cmv1.NewVersion().ID("4.12.24").RawID("openshift-4.12.24")
awsNodePool := cmv1.NewAWSNodePool().InstanceType("m5.xlarge")
nodeDrain := cmv1.NewValue().Value(1).Unit("minute")
nodePoolBuilder := cmv1.NewNodePool().ID(nodePoolName).Version(version).
AWSNodePool(awsNodePool).AvailabilityZone("us-east-1a").NodeDrainGracePeriod(nodeDrain)
np1, err := nodePoolBuilder.Build()
Expect(err).ToNot(HaveOccurred())
nodePoolBuilder = nodePoolBuilder.ID(nodePoolName + "2").Labels(map[string]string{"test": "label"}).
Autoscaling(cmv1.NewNodePoolAutoscaling().ID("scaler").MinReplica(100).MaxReplica(1000))
np2, err := nodePoolBuilder.Build()
Expect(err).ToNot(HaveOccurred())
return fmt.Sprintf("{\n \"items\": [\n %s,\n%s\n ],\n \"page\": 0,\n \"size\": 1,\n \"total\": 1\n}",
test.FormatResource(np1), test.FormatResource(np2))
}

// formatMachinePool simulates the output of APIs for a fake machine pool list
func formatMachinePool() string {
awsMachinePoolPool := cmv1.NewAWSMachinePool().SpotMarketOptions(cmv1.NewAWSSpotMarketOptions().MaxPrice(5))
machinePool, err := cmv1.NewMachinePool().ID(nodePoolName).AWS(awsMachinePoolPool).InstanceType("m5.xlarge").
AvailabilityZones("us-east-1a", "us-east-1b", "us-east-1c").Build()
Expect(err).ToNot(HaveOccurred())
return fmt.Sprintf("{\n \"items\": [\n %s\n ],\n \"page\": 0,\n \"size\": 1,\n \"total\": 1\n}",
test.FormatResource(machinePool))
}

// formatMachinePools simulates the output of APIs for a fake machine pool list (multiple)
func formatMachinePools() string {
awsMachinePoolPool := cmv1.NewAWSMachinePool().SpotMarketOptions(cmv1.NewAWSSpotMarketOptions().MaxPrice(5))
machinePoolBuilder := cmv1.NewMachinePool().ID(nodePoolName).AWS(awsMachinePoolPool).InstanceType("m5.xlarge").
AvailabilityZones("us-east-1a", "us-east-1b", "us-east-1c")
mp1, err := machinePoolBuilder.Build()
Expect(err).ToNot(HaveOccurred())
machinePoolBuilder = machinePoolBuilder.ID(nodePoolName + "2").Labels(map[string]string{"test": "label"})
mp2, err := machinePoolBuilder.Build()
Expect(err).ToNot(HaveOccurred())
machinePoolBuilder = machinePoolBuilder.ID(nodePoolName + "3").Taints(cmv1.NewTaint().Key("test").
Value("taint")).Autoscaling(cmv1.NewMachinePoolAutoscaling().ID("scaler").MaxReplicas(100).
MinReplicas(1))
mp3, err := machinePoolBuilder.Build()
Expect(err).ToNot(HaveOccurred())
return fmt.Sprintf("{\n \"items\": [\n %s,\n%s,\n%s\n ],\n \"page\": 0,\n \"size\": 1,\n "+
"\"total\": 1\n}", test.FormatResource(mp1), test.FormatResource(mp2), test.FormatResource(mp3))
}
13 changes: 13 additions & 0 deletions cmd/list/machinepool/main_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package machinepool

import (
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

func TestListMachinePool(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "List machine pool suite")
}

0 comments on commit e151f74

Please sign in to comment.