diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index d5d2c9cb4..971b74b7d 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -369,7 +369,7 @@ func (o *ClusterObjects) GetInstanceInfo() []*InstanceInfo { Namespace: pod.Namespace, Cluster: getLabelVal(pod.Labels, constant.AppInstanceLabelKey), Component: getLabelVal(pod.Labels, constant.KBAppComponentLabelKey), - Status: string(pod.Status.Phase), + Status: o.getPodPhase(&pod), Role: getLabelVal(pod.Labels, constant.RoleLabelKey), AccessMode: getLabelVal(pod.Labels, constant.ConsensusSetAccessModeLabelKey), CreatedTime: util.TimeFormat(&pod.CreationTimestamp), @@ -389,6 +389,92 @@ func (o *ClusterObjects) GetInstanceInfo() []*InstanceInfo { return instances } +// port from https://github.com/kubernetes/kubernetes/blob/master/pkg/printers/internalversion/printers.go#L860 +func (o *ClusterObjects) getPodPhase(pod *corev1.Pod) string { + reason := string(pod.Status.Phase) + if pod.Status.Reason != "" { + reason = pod.Status.Reason + } + + // If the Pod carries {type:PodScheduled, reason:WaitingForGates}, set reason to 'SchedulingGated'. + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodScheduled && condition.Reason == corev1.PodReasonSchedulingGated { + reason = corev1.PodReasonSchedulingGated + } + } + hasPodReadyCondition := func(conditions []corev1.PodCondition) bool { + for _, condition := range conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false + } + initializing := false + for i := range pod.Status.InitContainerStatuses { + container := pod.Status.InitContainerStatuses[i] + switch { + case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: + continue + case container.State.Terminated != nil: + // initialization is failed + if len(container.State.Terminated.Reason) == 0 { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) + } + } else { + reason = "Init:" + container.State.Terminated.Reason + } + initializing = true + case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": + reason = "Init:" + container.State.Waiting.Reason + initializing = true + default: + reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) + initializing = true + } + break + } + if !initializing { + hasRunning := false + for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { + container := pod.Status.ContainerStatuses[i] + switch { + case container.State.Waiting != nil && container.State.Waiting.Reason != "": + reason = container.State.Waiting.Reason + case container.State.Terminated != nil && container.State.Terminated.Reason != "": + reason = container.State.Terminated.Reason + case container.State.Terminated != nil && container.State.Terminated.Reason == "": + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + } + case container.Ready && container.State.Running != nil: + hasRunning = true + } + } + + // change pod status back to "Running" if there is at least one container still reporting as "Running" status + if reason == "Completed" && hasRunning { + if hasPodReadyCondition(pod.Status.Conditions) { + reason = "Running" + } else { + reason = "NotReady" + } + } + } + + if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" { + reason = "Unknown" + } else if pod.DeletionTimestamp != nil { + reason = "Terminating" + } + return reason +} + func (o *ClusterObjects) getStorageInfo(component *appsv1alpha1.ClusterComponentSpec) []StorageInfo { if component == nil { return nil