Skip to content

Commit

Permalink
Add recommended order of hosts removal (#960)
Browse files Browse the repository at this point in the history
  • Loading branch information
xmudrii authored Jul 8, 2020
1 parent d7293ba commit e0eeed1
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 32 deletions.
12 changes: 10 additions & 2 deletions pkg/cmd/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,16 @@ func runApply(opts *applyOpts) error {
for _, node := range brokenHosts {
s.Logger.Errorf("Host %q is broken and needs to be manually removed\n", node)
}
s.Logger.Warnf("You can remove %d host(s) at the same or otherwise quorum will be lost!!!\n", s.LiveCluster.EtcdToleranceRemain())
s.Logger.Warnf("After removing host(s), run kubeone apply again\n")

s.Logger.Warnf("Hosts must be removed in a correct order to preserve the Etcd quorum.")
s.Logger.Warnf("Loss of the Etcd quorum can cause loss of all data!!!")
s.Logger.Warnf("After removing recommended hosts, run 'kubeone apply' before removing any other host.")
s.Logger.Warnf("The recommended removal order:")

safeToDelete := s.LiveCluster.SafeToDeleteHosts()
for _, safe := range safeToDelete {
s.Logger.Warnf("- %q", safe)
}
}
// TODO: Should we return at the beginning after install?
for _, node := range s.LiveCluster.ControlPlane {
Expand Down
69 changes: 39 additions & 30 deletions pkg/state/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,23 @@ func (c *Cluster) IsProvisioned() bool {
return false
}

// Healthy checks the cluster overall healthiness
func (c *Cluster) Healthy() bool {
for i := range c.ControlPlane {
if !c.ControlPlane[i].ControlPlaneHealthy() {
return false
}
}

for i := range c.Workers {
if !c.Workers[i].WorkerHealthy() {
return false
}
}

return true
}

// BrokenHosts returns a list of broken hosts that needs to be removed manually
func (c *Cluster) BrokenHosts() []string {
brokenNodes := []string{}
Expand All @@ -94,50 +111,42 @@ func (c *Cluster) BrokenHosts() []string {
return brokenNodes
}

// Healthy checks the cluster overall healthiness
func (c *Cluster) Healthy() bool {
if !c.QuorumSatisfied() {
return false
}
func (c *Cluster) SafeToDeleteHosts() []string {
safeToDelete := []string{}
deleteCandidate := []string{}
tolerance := c.EtcdToleranceRemain()

for i := range c.ControlPlane {
if !c.ControlPlane[i].ControlPlaneHealthy() {
return false
if !c.ControlPlane[i].IsInCluster {
continue
}
}

for i := range c.Workers {
if !c.Workers[i].WorkerHealthy() {
return false
if !c.ControlPlane[i].Etcd.Healthy() {
safeToDelete = append(safeToDelete, c.ControlPlane[i].Config.Hostname)
} else if !c.ControlPlane[i].APIServer.Healthy() {
deleteCandidate = append(deleteCandidate, c.ControlPlane[i].Config.Hostname)
}
}
tolerance -= len(safeToDelete)
if tolerance > 0 {
safeToDelete = append(safeToDelete, deleteCandidate[:tolerance]...)
}

return true
return safeToDelete
}

// EtcdToleranceRemain returns how many non-working nodes can be removed at the same time.
// TODO: We should instruct user which node exactly to remove. For instance, if there are two broken nodes
// one with broken API server and one with broken etcd, the node with broken etcd must be removed first.
func (c *Cluster) EtcdToleranceRemain() int {
quorum := int(float64((len(c.ControlPlane) / 2) + 1))
tolerance := len(c.ControlPlane) - quorum

return tolerance
}

// QuorumSatisfied checks is number of healthy nodes satisfying the quorum
func (c *Cluster) QuorumSatisfied() bool {
var healthyNodes int
quorum := int(float64(((len(c.ControlPlane) / 2) + 1)))
tolerance := len(c.ControlPlane) - quorum

var healthyEtcd int
for i := range c.ControlPlane {
if c.ControlPlane[i].ControlPlaneHealthy() {
healthyNodes++
if c.ControlPlane[i].IsInCluster && c.ControlPlane[i].Etcd.Healthy() {
healthyEtcd++
}
}

return healthyNodes >= len(c.ControlPlane)-tolerance
quorum := int(float64((healthyEtcd / 2) + 1))
tolerance := healthyEtcd - quorum

return tolerance
}

// UpgradeNeeded compares actual and expected Kubernetes versions for control plane and static worker nodes
Expand Down

0 comments on commit e0eeed1

Please sign in to comment.