Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stop waiting on CAPI in API #393

Merged
merged 6 commits into from
Jun 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 27 additions & 9 deletions magnum_cluster_api/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,11 +340,6 @@ def upgrade_cluster(
This method is called synchonously by the Magnum API, therefore it will be blocking
the Magnum API, so it should be as fast as possible.
"""
need_to_wait = (
cluster.default_ng_master.image_id != cluster_template.image_id
or cluster.labels["kube_tag"] != cluster_template.labels["kube_tag"]
)

# XXX(mnaser): The Magnum API historically only did upgrade one node group at a
# time. This is a limitation of the Magnum API and not the Magnum
# Cluster API since doing multiple rolling upgrades was not very
Expand All @@ -365,12 +360,8 @@ def upgrade_cluster(
# NOTE(mnaser): We run a full apply on the cluster regardless of the changes, since
# the expectation is that running an upgrade operation will change
# the cluster in some way.
cluster_resource = objects.Cluster.for_magnum_cluster(self.k8s_api, cluster)
resources.apply_cluster_from_magnum_cluster(context, self.k8s_api, cluster)

if need_to_wait:
cluster_resource.wait_for_observed_generation_changed()

# NOTE(mnaser): We do not save the cluster object here because the Magnum driver
# will save the object that it passed to us here.

Expand Down Expand Up @@ -431,6 +422,9 @@ def update_nodegroups_status(

node_groups = []
for node_group in cluster.nodegroups:
if node_group.role == "master":
continue

md = objects.MachineDeployment.for_node_group_or_none(
self.k8s_api, cluster, node_group
)
Expand All @@ -448,6 +442,29 @@ def update_nodegroups_status(
node_group.status = fields.ClusterStatus.CREATE_COMPLETE
node_group.save()

# Get list of all of the OpenStackMachine objects for this node group
machines = objects.OpenStackMachine.objects(
self.k8s_api, namespace="magnum-system"
).filter(
selector={
"cluster.x-k8s.io/cluster-name": cluster.stack_id,
"topology.cluster.x-k8s.io/deployment-name": node_group.name,
},
)

# Ensure that the image ID from the spec matches all of the OpenStackMachine objects
# for this node group
md_spec = cluster_resource.get_machine_deployment_spec(node_group.name)
md_variables = {
i["name"]: i["value"] for i in md_spec["variables"]["overrides"]
}
image_id_match = all(
[
machine.obj["spec"]["imageUUID"] == md_variables["imageUUID"]
for machine in machines
]
)

# NOTE(mnaser): If the cluster is in `UPDATE_IN_PROGRESS` state, we need to
# wait for the `MachineDeployment` to match the desired state
# from the `Cluster` resource and that it is in the `Running`
Expand All @@ -458,6 +475,7 @@ def update_nodegroups_status(
and md.equals_spec(
cluster_resource.get_machine_deployment_spec(node_group.name)
)
and image_id_match
):
node_group.status = fields.ClusterStatus.UPDATE_COMPLETE
node_group.save()
Expand Down
22 changes: 18 additions & 4 deletions magnum_cluster_api/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,20 @@ def for_node_group_or_none(
return None

def equals_spec(self, spec: dict) -> bool:
annotations_match = self.obj["spec"]["template"]["metadata"].get(
expected_annotations = spec["metadata"].get("annotations")
current_annotations = self.obj["spec"]["template"]["metadata"].get(
"annotations"
) == spec["metadata"].get("annotations")
replicas_match = self.obj["spec"].get("replicas") == spec.get("replicas")
)

# NOTE(mnaser): If we have any annotations, that means that autoscaling is
# enabled and we should not compare the replicas.
if expected_annotations:
return expected_annotations == current_annotations

expected_replicas = spec.get("replicas")
current_replicas = self.obj["spec"].get("replicas")

return annotations_match and replicas_match
return expected_replicas == current_replicas


class Machine(NamespacedAPIObject):
Expand Down Expand Up @@ -286,6 +294,12 @@ def openstack_cluster(self):
return list(filtered_clusters)[0]


class OpenStackMachine(NamespacedAPIObject):
version = "infrastructure.cluster.x-k8s.io/v1alpha7"
endpoint = "openstackmachines"
kind = "OpenStackMachine"


class StorageClass(pykube.objects.APIObject):
version = "storage.k8s.io/v1"
endpoint = "storageclasses"
Expand Down
4 changes: 4 additions & 0 deletions magnum_cluster_api/tests/functional/test_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@ def test_upgrade_cluster(self, context, ubuntu_driver, cluster_template):
context, self.cluster, cluster_template, None, None
)

cluster_resource.wait_for_observed_generation_changed(
existing_observed_generation=current_observed_generation,
)

cluster_resource = objects.Cluster.for_magnum_cluster(self.api, self.cluster)
assert cluster_resource.observed_generation != current_observed_generation

Expand Down
Loading