Skip to content

Commit

Permalink
Remove check for clients in status command
Browse files Browse the repository at this point in the history
  • Loading branch information
Thomas Eckert committed Oct 3, 2022
1 parent d3258f9 commit 9f26fb6
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 125 deletions.
28 changes: 0 additions & 28 deletions cli/cmd/status/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,7 @@ func (c *Command) Run(args []string) int {
c.helmActionsRunner = &helm.ActionRunner{}
}

// The logger is initialized in main with the name cli. Here, we reset the name to status so log lines would be prefixed with status.
c.Log.ResetNamed("status")

defer common.CloseWithError(c.BaseCommand)

if err := c.set.Parse(args); err != nil {
Expand Down Expand Up @@ -128,13 +126,6 @@ func (c *Command) Run(args []string) int {
c.UI.Output(s, terminal.WithSuccessStyle())
}

if s, err := c.checkConsulClients(namespace); err != nil {
c.UI.Output(err.Error(), terminal.WithErrorStyle())
return 1
} else {
c.UI.Output(s, terminal.WithSuccessStyle())
}

return 0
}

Expand Down Expand Up @@ -245,25 +236,6 @@ func (c *Command) checkConsulServers(namespace string) (string, error) {
return fmt.Sprintf("Consul servers healthy (%d/%d)", readyReplicas, desiredReplicas), nil
}

// checkConsulClients uses the Kubernetes list function to report if the consul clients are healthy.
func (c *Command) checkConsulClients(namespace string) (string, error) {
clients, err := c.kubernetes.AppsV1().DaemonSets(namespace).List(c.Ctx,
metav1.ListOptions{LabelSelector: "app=consul,chart=consul-helm"})
if err != nil {
return "", err
} else if len(clients.Items) == 0 {
return "", errors.New("no client daemon set found")
} else if len(clients.Items) > 1 {
return "", errors.New("found multiple client daemon sets")
}
desiredReplicas := int(clients.Items[0].Status.DesiredNumberScheduled)
readyReplicas := int(clients.Items[0].Status.NumberReady)
if readyReplicas < desiredReplicas {
return "", fmt.Errorf("%d/%d Consul clients unhealthy", desiredReplicas-readyReplicas, desiredReplicas)
}
return fmt.Sprintf("Consul clients healthy (%d/%d)", readyReplicas, desiredReplicas), nil
}

// setupKubeClient to use for non Helm SDK calls to the Kubernetes API The Helm SDK will use
// settings.RESTClientGetter for its calls as well, so this will use a consistent method to
// target the right cluster for both Helm SDK and non Helm SDK calls.
Expand Down
99 changes: 2 additions & 97 deletions cli/cmd/status/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,77 +63,6 @@ func TestCheckConsulServers(t *testing.T) {
require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul servers unhealthy", 1, replicas))
}

// TestCheckConsulClients is very similar to TestCheckConsulServers() in structure.
func TestCheckConsulClients(t *testing.T) {
c := getInitializedCommand(t, nil)
c.kubernetes = fake.NewSimpleClientset()

// No client daemon set should cause an error.
_, err := c.checkConsulClients("default")
require.Error(t, err)
require.Contains(t, err.Error(), "no client daemon set found")

// Next create a daemon set.
var desired int32 = 3

ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "consul-client-test1",
Namespace: "default",
Labels: map[string]string{"app": "consul", "chart": "consul-helm"},
},
Status: appsv1.DaemonSetStatus{
DesiredNumberScheduled: desired,
NumberReady: desired,
},
}

c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds, metav1.CreateOptions{})

// Now run checkConsulClients() and make sure it succeeds.
s, err := c.checkConsulClients("default")
require.NoError(t, err)
require.Equal(t, "Consul clients healthy (3/3)", s)

// Creating another daemon set should cause an error.
ds2 := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "consul-client-test2",
Namespace: "default",
Labels: map[string]string{"app": "consul", "chart": "consul-helm"},
},
Status: appsv1.DaemonSetStatus{
DesiredNumberScheduled: desired,
NumberReady: desired,
},
}
c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds2, metav1.CreateOptions{})

_, err = c.checkConsulClients("default")
require.Error(t, err)
require.Contains(t, err.Error(), "found multiple client daemon sets")

// Clear out the client and run a test with fewer than desired daemon sets ready.
c.kubernetes = fake.NewSimpleClientset()

ds3 := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "consul-client-test2",
Namespace: "default",
Labels: map[string]string{"app": "consul", "chart": "consul-helm"},
},
Status: appsv1.DaemonSetStatus{
DesiredNumberScheduled: desired,
NumberReady: desired - 1,
},
}
c.kubernetes.AppsV1().DaemonSets("default").Create(context.Background(), ds3, metav1.CreateOptions{})

_, err = c.checkConsulClients("default")
require.Error(t, err)
require.Contains(t, err.Error(), fmt.Sprintf("%d/%d Consul clients unhealthy", 1, desired))
}

// TestStatus creates a fake stateful set and tests the checkConsulServers function.
func TestStatus(t *testing.T) {
nowTime := helmTime.Now()
Expand All @@ -150,7 +79,7 @@ func TestStatus(t *testing.T) {
input: []string{},
messages: []string{
fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr),
"\n==> Config:\n {}\n \n ✓ Consul servers healthy (3/3)\n ✓ Consul clients healthy (3/3)\n",
"\n==> Config:\n {}\n \n ✓ Consul servers healthy (3/3)\n",
},
preProcessingFunc: func(k8s kubernetes.Interface) {
createDaemonset("consul-client-test1", "consul", 3, 3, k8s)
Expand Down Expand Up @@ -196,36 +125,12 @@ func TestStatus(t *testing.T) {
},
expectedReturnCode: 1,
},
"status with no clients returns error": {
input: []string{},
messages: []string{
fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr),
"\n==> Config:\n {}\n \n ✓ Consul servers healthy (3/3)\n ! no client daemon set found\n",
},
preProcessingFunc: func(k8s kubernetes.Interface) {
createStatefulSet("consul-server-test1", "consul", 3, 3, k8s)
},
helmActionsRunner: &helm.MockActionRunner{
GetStatusFunc: func(status *action.Status, name string) (*helmRelease.Release, error) {
return &helmRelease.Release{
Name: "consul", Namespace: "consul",
Info: &helmRelease.Info{LastDeployed: nowTime, Status: "READY"},
Chart: &chart.Chart{
Metadata: &chart.Metadata{
Version: "1.0.0",
},
},
Config: make(map[string]interface{})}, nil
},
},
expectedReturnCode: 1,
},
"status with pre-install and pre-upgrade hooks returns success and outputs hook status": {
input: []string{},
messages: []string{
fmt.Sprintf("\n==> Consul Status Summary\nName\tNamespace\tStatus\tChart Version\tAppVersion\tRevision\tLast Updated \n \t \tREADY \t1.0.0 \t \t0 \t%s\t\n", notImeStr),
"\n==> Config:\n {}\n \n",
"\n==> Status Of Helm Hooks:\npre-install-hook pre-install: Succeeded\npre-upgrade-hook pre-upgrade: Succeeded\n ✓ Consul servers healthy (3/3)\n ✓ Consul clients healthy (3/3)\n",
"\n==> Status Of Helm Hooks:\npre-install-hook pre-install: Succeeded\npre-upgrade-hook pre-upgrade: Succeeded\n ✓ Consul servers healthy (3/3)\n",
},
preProcessingFunc: func(k8s kubernetes.Interface) {
createDaemonset("consul-client-test1", "consul", 3, 3, k8s)
Expand Down

0 comments on commit 9f26fb6

Please sign in to comment.