forked from vmware/go-vcloud-director
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcse_yaml.go
411 lines (375 loc) · 17.1 KB
/
cse_yaml.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
package govcd
import (
"fmt"
semver "github.com/hashicorp/go-version"
"github.com/vmware/go-vcloud-director/v2/types/v56"
"sigs.k8s.io/yaml"
"strings"
)
// updateCapiYaml takes a YAML and modifies its Kubernetes Template OVA, its Control plane, its Worker pools
// and its Node Health Check capabilities, by using the new values provided as input.
// If some of the values of the input is not provided, it doesn't change them.
// If none of the values is provided, it just returns the same untouched YAML.
func (cluster *CseKubernetesCluster) updateCapiYaml(input CseClusterUpdateInput) (string, error) {
if cluster == nil || cluster.capvcdType == nil {
return "", fmt.Errorf("receiver cluster is nil")
}
if input.ControlPlane == nil && input.WorkerPools == nil && input.NodeHealthCheck == nil && input.KubernetesTemplateOvaId == nil && input.NewWorkerPools == nil {
return cluster.capvcdType.Spec.CapiYaml, nil
}
// The YAML contains multiple documents, so we cannot use a simple yaml.Unmarshal() as this one just gets the first
// document it finds.
yamlDocs, err := unmarshalMultipleYamlDocuments(cluster.capvcdType.Spec.CapiYaml)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, fmt.Errorf("error unmarshalling YAML: %s", err)
}
if input.ControlPlane != nil {
err := cseUpdateControlPlaneInYaml(yamlDocs, *input.ControlPlane)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, err
}
}
if input.WorkerPools != nil {
err := cseUpdateWorkerPoolsInYaml(yamlDocs, *input.WorkerPools)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, err
}
}
// Order matters. We need to add the new pools before updating the Kubernetes template.
if input.NewWorkerPools != nil {
// Worker pool names must be unique
for _, existingPool := range cluster.WorkerPools {
for _, newPool := range *input.NewWorkerPools {
if newPool.Name == existingPool.Name {
return cluster.capvcdType.Spec.CapiYaml, fmt.Errorf("there is an existing Worker Pool with name '%s'", existingPool.Name)
}
}
}
yamlDocs, err = cseAddWorkerPoolsInYaml(yamlDocs, *cluster, *input.NewWorkerPools)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, err
}
}
// As a side note, we can't optimize this one with "if <current value> equals <new value> do nothing" because
// in order to retrieve the current value we would need to explore the YAML anyway, which is what we also need to do to update it.
// Also, even if we did it, the current value obtained from YAML would be a Name, but the new value is an ID, so we would need to query VCD anyway
// as well.
// So in this special case this "optimization" would optimize nothing. The same happens with other YAML values.
if input.KubernetesTemplateOvaId != nil {
vAppTemplate, err := getVAppTemplateById(cluster.client, *input.KubernetesTemplateOvaId)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, fmt.Errorf("could not retrieve the Kubernetes Template OVA with ID '%s': %s", *input.KubernetesTemplateOvaId, err)
}
// Check the versions of the selected OVA before upgrading
versions, err := getTkgVersionBundleFromVAppTemplate(vAppTemplate.VAppTemplate)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, fmt.Errorf("could not retrieve the TKG versions of OVA '%s': %s", *input.KubernetesTemplateOvaId, err)
}
if versions.compareTkgVersion(cluster.capvcdType.Status.Capvcd.Upgrade.Current.TkgVersion) < 0 || !versions.kubernetesVersionIsUpgradeableFrom(cluster.capvcdType.Status.Capvcd.Upgrade.Current.KubernetesVersion) {
return cluster.capvcdType.Spec.CapiYaml, fmt.Errorf("cannot perform an OVA change as the new one '%s' has an older TKG/Kubernetes version (%s/%s)", vAppTemplate.VAppTemplate.Name, versions.TkgVersion, versions.KubernetesVersion)
}
err = cseUpdateKubernetesTemplateInYaml(yamlDocs, vAppTemplate.VAppTemplate)
if err != nil {
return cluster.capvcdType.Spec.CapiYaml, err
}
}
if input.NodeHealthCheck != nil {
cseComponentsVersions, err := getCseComponentsVersions(cluster.CseVersion)
if err != nil {
return "", err
}
vcdKeConfig, err := getVcdKeConfig(cluster.client, cseComponentsVersions.VcdKeConfigRdeTypeVersion, *input.NodeHealthCheck)
if err != nil {
return "", err
}
yamlDocs, err = cseUpdateNodeHealthCheckInYaml(yamlDocs, cluster.Name, cluster.CseVersion, vcdKeConfig)
if err != nil {
return "", err
}
}
return marshalMultipleYamlDocuments(yamlDocs)
}
// cseUpdateKubernetesTemplateInYaml modifies the given Kubernetes cluster YAML by modifying the Kubernetes Template OVA
// used by all the cluster elements.
// The caveat here is that not only VCDMachineTemplate needs to be changed with the new OVA name, but also
// other fields that reference the related Kubernetes version, TKG version and other derived information.
func cseUpdateKubernetesTemplateInYaml(yamlDocuments []map[string]interface{}, kubernetesTemplateOva *types.VAppTemplate) error {
tkgBundle, err := getTkgVersionBundleFromVAppTemplate(kubernetesTemplateOva)
if err != nil {
return err
}
for _, d := range yamlDocuments {
switch d["kind"] {
case "VCDMachineTemplate":
ok := traverseMapAndGet[string](d, "spec.template.spec.template") != ""
if !ok {
return fmt.Errorf("the VCDMachineTemplate 'spec.template.spec.template' field is missing")
}
d["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] = kubernetesTemplateOva.Name
case "MachineDeployment":
ok := traverseMapAndGet[string](d, "spec.template.spec.version") != ""
if !ok {
return fmt.Errorf("the MachineDeployment 'spec.template.spec.version' field is missing")
}
d["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["version"] = tkgBundle.KubernetesVersion
case "Cluster":
ok := traverseMapAndGet[string](d, "metadata.annotations.TKGVERSION") != ""
if !ok {
return fmt.Errorf("the Cluster 'metadata.annotations.TKGVERSION' field is missing")
}
d["metadata"].(map[string]interface{})["annotations"].(map[string]interface{})["TKGVERSION"] = tkgBundle.TkgVersion
ok = traverseMapAndGet[string](d, "metadata.labels.tanzuKubernetesRelease") != ""
if !ok {
return fmt.Errorf("the Cluster 'metadata.labels.tanzuKubernetesRelease' field is missing")
}
d["metadata"].(map[string]interface{})["labels"].(map[string]interface{})["tanzuKubernetesRelease"] = tkgBundle.TkrVersion
case "KubeadmControlPlane":
ok := traverseMapAndGet[string](d, "spec.version") != ""
if !ok {
return fmt.Errorf("the KubeadmControlPlane 'spec.version' field is missing")
}
d["spec"].(map[string]interface{})["version"] = tkgBundle.KubernetesVersion
ok = traverseMapAndGet[string](d, "spec.kubeadmConfigSpec.clusterConfiguration.dns.imageTag") != ""
if !ok {
return fmt.Errorf("the KubeadmControlPlane 'spec.kubeadmConfigSpec.clusterConfiguration.dns.imageTag' field is missing")
}
d["spec"].(map[string]interface{})["kubeadmConfigSpec"].(map[string]interface{})["clusterConfiguration"].(map[string]interface{})["dns"].(map[string]interface{})["imageTag"] = tkgBundle.CoreDnsVersion
ok = traverseMapAndGet[string](d, "spec.kubeadmConfigSpec.clusterConfiguration.etcd.local.imageTag") != ""
if !ok {
return fmt.Errorf("the KubeadmControlPlane 'spec.kubeadmConfigSpec.clusterConfiguration.etcd.local.imageTag' field is missing")
}
d["spec"].(map[string]interface{})["kubeadmConfigSpec"].(map[string]interface{})["clusterConfiguration"].(map[string]interface{})["etcd"].(map[string]interface{})["local"].(map[string]interface{})["imageTag"] = tkgBundle.EtcdVersion
}
}
return nil
}
// cseUpdateControlPlaneInYaml modifies the given Kubernetes cluster YAML contents by changing the Control Plane with the input parameters.
func cseUpdateControlPlaneInYaml(yamlDocuments []map[string]interface{}, input CseControlPlaneUpdateInput) error {
if input.MachineCount < 1 || input.MachineCount%2 == 0 {
return fmt.Errorf("incorrect machine count for Control Plane: %d. Should be at least 1 and an odd number", input.MachineCount)
}
updated := false
for _, d := range yamlDocuments {
if d["kind"] != "KubeadmControlPlane" {
continue
}
d["spec"].(map[string]interface{})["replicas"] = float64(input.MachineCount) // As it was originally unmarshalled as a float64
updated = true
}
if !updated {
return fmt.Errorf("could not find the KubeadmControlPlane object in the YAML")
}
return nil
}
// cseUpdateControlPlaneInYaml modifies the given Kubernetes cluster YAML contents by changing
// the existing Worker Pools with the input parameters.
func cseUpdateWorkerPoolsInYaml(yamlDocuments []map[string]interface{}, workerPools map[string]CseWorkerPoolUpdateInput) error {
updated := 0
for _, d := range yamlDocuments {
if d["kind"] != "MachineDeployment" {
continue
}
workerPoolName := traverseMapAndGet[string](d, "metadata.name")
if workerPoolName == "" {
return fmt.Errorf("the MachineDeployment 'metadata.name' field is empty")
}
workerPoolToUpdate := ""
for wpName := range workerPools {
if wpName == workerPoolName {
workerPoolToUpdate = wpName
}
}
// This worker pool must not be updated as it is not present in the input, continue searching for the ones we want
if workerPoolToUpdate == "" {
continue
}
if workerPools[workerPoolToUpdate].MachineCount < 0 {
return fmt.Errorf("incorrect machine count for worker pool %s: %d. Should be at least 0", workerPoolToUpdate, workerPools[workerPoolToUpdate].MachineCount)
}
d["spec"].(map[string]interface{})["replicas"] = float64(workerPools[workerPoolToUpdate].MachineCount) // As it was originally unmarshalled as a float64
updated++
}
if updated != len(workerPools) {
return fmt.Errorf("could not update all the Node pools. Updated %d, expected %d", updated, len(workerPools))
}
return nil
}
// cseAddWorkerPoolsInYaml modifies the given Kubernetes cluster YAML contents by adding new Worker Pools
// described by the input parameters.
// NOTE: This function doesn't modify the input, but returns a copy of the YAML with the added unmarshalled documents.
func cseAddWorkerPoolsInYaml(docs []map[string]interface{}, cluster CseKubernetesCluster, newWorkerPools []CseWorkerPoolSettings) ([]map[string]interface{}, error) {
if len(newWorkerPools) == 0 {
return docs, nil
}
var computePolicyIds []string
var storageProfileIds []string
for _, w := range newWorkerPools {
computePolicyIds = append(computePolicyIds, w.SizingPolicyId, w.PlacementPolicyId, w.VGpuPolicyId)
storageProfileIds = append(storageProfileIds, w.StorageProfileId)
}
idToNameCache, err := idToNames(cluster.client, computePolicyIds, storageProfileIds)
if err != nil {
return nil, err
}
internalSettings := cseClusterSettingsInternal{WorkerPools: make([]cseWorkerPoolSettingsInternal, len(newWorkerPools))}
for i, workerPool := range newWorkerPools {
internalSettings.WorkerPools[i] = cseWorkerPoolSettingsInternal{
Name: workerPool.Name,
MachineCount: workerPool.MachineCount,
DiskSizeGi: workerPool.DiskSizeGi,
StorageProfileName: idToNameCache[workerPool.StorageProfileId],
SizingPolicyName: idToNameCache[workerPool.SizingPolicyId],
VGpuPolicyName: idToNameCache[workerPool.VGpuPolicyId],
PlacementPolicyName: idToNameCache[workerPool.PlacementPolicyId],
}
}
// Extra information needed to render the YAML. As all the worker pools share the same
// Kubernetes OVA name, version and Catalog, we pick this info from any of the available ones.
for _, doc := range docs {
if internalSettings.CatalogName == "" && doc["kind"] == "VCDMachineTemplate" {
internalSettings.CatalogName = traverseMapAndGet[string](doc, "spec.template.spec.catalog")
}
if internalSettings.KubernetesTemplateOvaName == "" && doc["kind"] == "VCDMachineTemplate" {
internalSettings.KubernetesTemplateOvaName = traverseMapAndGet[string](doc, "spec.template.spec.template")
}
if internalSettings.TkgVersionBundle.KubernetesVersion == "" && doc["kind"] == "MachineDeployment" {
internalSettings.TkgVersionBundle.KubernetesVersion = traverseMapAndGet[string](doc, "spec.template.spec.version")
}
if internalSettings.CatalogName != "" && internalSettings.KubernetesTemplateOvaName != "" && internalSettings.TkgVersionBundle.KubernetesVersion != "" {
break
}
}
internalSettings.Name = cluster.Name
internalSettings.CseVersion = cluster.CseVersion
nodePoolsYaml, err := internalSettings.generateWorkerPoolsYaml()
if err != nil {
return nil, err
}
newWorkerPoolsYamlDocs, err := unmarshalMultipleYamlDocuments(nodePoolsYaml)
if err != nil {
return nil, err
}
result := make([]map[string]interface{}, len(docs))
copy(result, docs)
return append(result, newWorkerPoolsYamlDocs...), nil
}
// cseUpdateNodeHealthCheckInYaml updates the Kubernetes cluster described in the given YAML documents by adding or removing
// the MachineHealthCheck object.
// NOTE: This function doesn't modify the input, but returns a copy of the YAML with the modifications.
func cseUpdateNodeHealthCheckInYaml(yamlDocuments []map[string]interface{}, clusterName string, cseVersion semver.Version, vcdKeConfig vcdKeConfig) ([]map[string]interface{}, error) {
mhcPosition := -1
result := make([]map[string]interface{}, len(yamlDocuments))
for i, d := range yamlDocuments {
if d["kind"] == "MachineHealthCheck" {
mhcPosition = i
}
result[i] = d
}
machineHealthCheckEnabled := vcdKeConfig.NodeUnknownTimeout != "" && vcdKeConfig.NodeStartupTimeout != "" && vcdKeConfig.NodeNotReadyTimeout != "" &&
vcdKeConfig.MaxUnhealthyNodesPercentage != 0
if mhcPosition < 0 {
// There is no MachineHealthCheck block
if !machineHealthCheckEnabled {
// We don't want it neither, so nothing to do
return result, nil
}
// We need to add the block to the slice of YAML documents
settings := &cseClusterSettingsInternal{CseVersion: cseVersion, Name: clusterName, VcdKeConfig: vcdKeConfig}
mhcYaml, err := settings.generateMachineHealthCheckYaml()
if err != nil {
return nil, err
}
var mhc map[string]interface{}
err = yaml.Unmarshal([]byte(mhcYaml), &mhc)
if err != nil {
return nil, err
}
result = append(result, mhc)
} else {
// There is a MachineHealthCheck block
if machineHealthCheckEnabled {
// We want it, but it is already there, so nothing to do
return result, nil
}
// We don't want Machine Health Checks, we delete the YAML document
result[mhcPosition] = result[len(result)-1] // We override the MachineHealthCheck block with the last document
result = result[:len(result)-1] // We remove the last document (now duplicated)
}
return result, nil
}
// marshalMultipleYamlDocuments takes a slice of maps representing multiple YAML documents (one per item in the slice) and
// marshals all of them into a single string with the corresponding separators "---".
func marshalMultipleYamlDocuments(yamlDocuments []map[string]interface{}) (string, error) {
result := ""
for i, yamlDoc := range yamlDocuments {
updatedSingleDoc, err := yaml.Marshal(yamlDoc)
if err != nil {
return "", fmt.Errorf("error marshaling the updated CAPVCD YAML '%v': %s", yamlDoc, err)
}
result += fmt.Sprintf("%s\n", updatedSingleDoc)
if i < len(yamlDocuments)-1 { // The last document doesn't need the YAML separator
result += "---\n"
}
}
return result, nil
}
// unmarshalMultipleYamlDocuments takes a multi-document YAML (multiple YAML documents are separated by "---") and
// unmarshalls all of them into a slice of generic maps with the corresponding content.
func unmarshalMultipleYamlDocuments(yamlDocuments string) ([]map[string]interface{}, error) {
if len(strings.TrimSpace(yamlDocuments)) == 0 {
return []map[string]interface{}{}, nil
}
splitYamlDocs := strings.Split(yamlDocuments, "---\n")
result := make([]map[string]interface{}, len(splitYamlDocs))
for i, yamlDoc := range splitYamlDocs {
err := yaml.Unmarshal([]byte(yamlDoc), &result[i])
if err != nil {
return nil, fmt.Errorf("could not unmarshal document %s: %s", yamlDoc, err)
}
}
return result, nil
}
// traverseMapAndGet traverses the input interface{}, which should be a map of maps, by following the path specified as
// "keyA.keyB.keyC.keyD", doing something similar to, visually speaking, map["keyA"]["keyB"]["keyC"]["keyD"], or in other words,
// it goes inside every inner map iteratively, until the given path is finished.
// If the path doesn't lead to any value, or if the value is nil, or there is any other issue, returns the "zero" value of T.
func traverseMapAndGet[T any](input interface{}, path string) T {
var nothing T
if input == nil {
return nothing
}
inputMap, ok := input.(map[string]interface{})
if !ok {
return nothing
}
if len(inputMap) == 0 {
return nothing
}
pathUnits := strings.Split(path, ".")
completed := false
i := 0
var result interface{}
for !completed {
subPath := pathUnits[i]
traversed, ok := inputMap[subPath]
if !ok {
return nothing
}
if i < len(pathUnits)-1 {
traversedMap, ok := traversed.(map[string]interface{})
if !ok {
return nothing
}
inputMap = traversedMap
} else {
completed = true
result = traversed
}
i++
}
resultTyped, ok := result.(T)
if !ok {
return nothing
}
return resultTyped
}