-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
This PR was split off from PR #165 to make it a smaller PR. There are edge cases that will be implemented in a separate PR.
- Loading branch information
Showing
4 changed files
with
424 additions
and
21 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,165 @@ | ||
/* | ||
Copyright 2018 The Kubernetes Authors. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package machineset | ||
|
||
import ( | ||
"fmt" | ||
"time" | ||
|
||
"github.com/golang/glog" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/labels" | ||
|
||
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" | ||
machinesetclientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" | ||
) | ||
|
||
func (c *MachineSetControllerImpl) calculateStatus(ms *v1alpha1.MachineSet, filteredMachines []*v1alpha1.Machine) v1alpha1.MachineSetStatus { | ||
newStatus := ms.Status | ||
// Count the number of machines that have labels matching the labels of the machine | ||
// template of the replica set, the matching machines may have more | ||
// labels than are in the template. Because the label of machineTemplateSpec is | ||
// a superset of the selector of the replica set, so the possible | ||
// matching machines must be part of the filteredMachines. | ||
fullyLabeledReplicasCount := 0 | ||
readyReplicasCount := 0 | ||
availableReplicasCount := 0 | ||
templateLabel := labels.Set(ms.Spec.Template.Labels).AsSelectorPreValidated() | ||
for _, machine := range filteredMachines { | ||
if templateLabel.Matches(labels.Set(machine.Labels)) { | ||
fullyLabeledReplicasCount++ | ||
} | ||
node, err := c.getMachineNode(machine) | ||
if err != nil { | ||
glog.Warningf("Unable to get node for machine %v, %v", machine.Name, err) | ||
continue | ||
} | ||
if isNodeReady(node) { | ||
readyReplicasCount++ | ||
if isNodeAvailable(node, ms.Spec.MinReadySeconds, metav1.Now()) { | ||
availableReplicasCount++ | ||
} | ||
} | ||
} | ||
|
||
newStatus.Replicas = int32(len(filteredMachines)) | ||
newStatus.FullyLabeledReplicas = int32(fullyLabeledReplicasCount) | ||
newStatus.ReadyReplicas = int32(readyReplicasCount) | ||
newStatus.AvailableReplicas = int32(availableReplicasCount) | ||
return newStatus | ||
} | ||
|
||
// updateMachineSetStatus attempts to update the Status.Replicas of the given MachineSet, with a single GET/PUT retry. | ||
func updateMachineSetStatus(c machinesetclientset.MachineSetInterface, ms *v1alpha1.MachineSet, newStatus v1alpha1.MachineSetStatus) (*v1alpha1.MachineSet, error) { | ||
// This is the steady state. It happens when the MachineSet doesn't have any expectations, since | ||
// we do a periodic relist every 30s. If the generations differ but the replicas are | ||
// the same, a caller might've resized to the same replica count. | ||
if ms.Status.Replicas == newStatus.Replicas && | ||
ms.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas && | ||
ms.Status.ReadyReplicas == newStatus.ReadyReplicas && | ||
ms.Status.AvailableReplicas == newStatus.AvailableReplicas && | ||
ms.Generation == ms.Status.ObservedGeneration { | ||
return ms, nil | ||
} | ||
|
||
// Save the generation number we acted on, otherwise we might wrongfully indicate | ||
// that we've seen a spec update when we retry. | ||
// TODO: This can clobber an update if we allow multiple agents to write to the | ||
// same status. | ||
newStatus.ObservedGeneration = ms.Generation | ||
|
||
var getErr, updateErr error | ||
var updatedMS *v1alpha1.MachineSet | ||
for i := 0; ; i++ { | ||
glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", ms.Kind, ms.Namespace, ms.Name) + | ||
fmt.Sprintf("replicas %d->%d (need %d), ", ms.Status.Replicas, newStatus.Replicas, *(ms.Spec.Replicas)) + | ||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + | ||
fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas) + | ||
fmt.Sprintf("availableReplicas %d->%d, ", ms.Status.AvailableReplicas, newStatus.AvailableReplicas) + | ||
fmt.Sprintf("sequence No: %v->%v", ms.Status.ObservedGeneration, newStatus.ObservedGeneration)) | ||
|
||
ms.Status = newStatus | ||
updatedMS, updateErr = c.UpdateStatus(ms) | ||
if updateErr == nil { | ||
return updatedMS, nil | ||
} | ||
// Stop retrying if we exceed statusUpdateRetries - the machineSet will be requeued with a rate limit. | ||
if i >= statusUpdateRetries { | ||
break | ||
} | ||
// Update the MachineSet with the latest resource version for the next poll | ||
if ms, getErr = c.Get(ms.Name, metav1.GetOptions{}); getErr != nil { | ||
// If the GET fails we can't trust status.Replicas anymore. This error | ||
// is bound to be more interesting than the update failure. | ||
return nil, getErr | ||
} | ||
} | ||
|
||
return nil, updateErr | ||
} | ||
|
||
func isNodeAvailable(node *corev1.Node, minReadySeconds int32, now metav1.Time) bool { | ||
if !isNodeReady(node) { | ||
return false | ||
} | ||
|
||
if minReadySeconds == 0 { | ||
return true | ||
} | ||
|
||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second | ||
_, readyCondition := getNodeCondition(&node.Status, corev1.NodeReady) | ||
|
||
if !readyCondition.LastTransitionTime.IsZero() && | ||
readyCondition.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { | ||
return true | ||
} | ||
|
||
return false | ||
} | ||
|
||
// getNodeCondition extracts the provided condition from the given status and returns that. | ||
// Returns nil and -1 if the condition is not present, and the index of the located condition. | ||
func getNodeCondition(status *corev1.NodeStatus, conditionType corev1.NodeConditionType) (int, *corev1.NodeCondition) { | ||
if status == nil { | ||
return -1, nil | ||
} | ||
for i := range status.Conditions { | ||
if status.Conditions[i].Type == conditionType { | ||
return i, &status.Conditions[i] | ||
} | ||
} | ||
return -1, nil | ||
} | ||
|
||
// isNodeReady returns true if a node is ready; false otherwise. | ||
func isNodeReady(node *corev1.Node) bool { | ||
for _, c := range node.Status.Conditions { | ||
if c.Type == corev1.NodeReady { | ||
return c.Status == corev1.ConditionTrue | ||
} | ||
} | ||
return false | ||
} | ||
|
||
func getMachinesToDelete(filteredMachines []*v1alpha1.Machine, diff int) []*v1alpha1.Machine { | ||
// TODO: Define machines deletion policies. | ||
// see: https://github.com/kubernetes/kube-deploy/issues/625 | ||
return filteredMachines[:diff] | ||
} |
Oops, something went wrong.