-
Notifications
You must be signed in to change notification settings - Fork 5
/
helm.cr
393 lines (345 loc) · 14.8 KB
/
helm.cr
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
require "kubectl_client"
require "./src/utils/utils.cr"
require "./src/utils/binary_reference.cr"
module Helm
BinarySingleton = BinaryReference.new
#TODO move to kubectlclient
DEPLOYMENT="Deployment"
SERVICE="Service"
POD="Pod"
CHART_YAML = "Chart.yaml"
module ShellCmd
def self.run(cmd, log_prefix, force_output=false)
Log.info { "#{log_prefix} command: #{cmd}" }
status = Process.run(
cmd,
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new
)
if force_output == false
Log.debug { "#{log_prefix} output: #{output.to_s}" }
else
Log.info { "#{log_prefix} output: #{output.to_s}" }
end
# Don't have to output log line if stderr is empty
if stderr.to_s.size > 1
Log.info { "#{log_prefix} stderr: #{stderr.to_s}" }
end
{status: status, output: output.to_s, error: stderr.to_s}
end
end
# Use helm to apply the helm values file to the helm chart templates to create a complete manifest
# Helm uses manifest files that can be jinja templates
def self.generate_manifest_from_templates(release_name, helm_chart, output_file="cnfs/temp_template.yml", namespace : String | Nil = nil, helm_values = nil)
# namespace can be an empty string. So verify and set it to nil.
if !namespace.nil? && namespace.empty?
namespace = nil
end
Log.debug { "generate_manifest_from_templates" }
helm = BinarySingleton.helm
Log.info { "Helm::generate_manifest_from_templates command: #{helm} template #{release_name} #{helm_chart} > #{output_file}" }
Helm::ShellCmd.run("ls -alR #{helm_chart}", "before generate")
Helm::ShellCmd.run("ls -alR cnfs", "before generate")
resp = Helm.template(release_name, helm_chart, output_file, namespace, helm_values)
Helm::ShellCmd.run("ls -alR #{helm_chart}", "after generate")
Helm::ShellCmd.run("ls -alR cnfs", "after generate")
Log.debug { "generate_manifest_from_templates output_file: #{output_file}" }
[resp[:status].success?, output_file]
end
def self.generate_manifest(release_name : String, namespace : String)
Log.info { "Generating manifest from installed CNF: #{release_name}" }
helm = BinarySingleton.helm
cmd = "#{helm} get manifest #{release_name} --namespace #{namespace}"
Log.info { "helm command: #{cmd}" }
status = Process.run(cmd, shell: true, output: output = IO::Memory.new, error: stderr = IO::Memory.new)
if status.success? && !output.empty?
Log.debug { "Helm.manifest output:\n #{output.to_s}" }
Log.info { "Manifest was generated successfully" }
if !stderr.empty?
Log.info { "Helm.manifest stderr: #{stderr.to_s}" }
end
else
raise ManifestGenerationError.new(stderr.to_s)
end
output.to_s
end
def self.workload_resource_by_kind(ymls : Array(YAML::Any), kind : String)
Log.info { "workload_resource_by_kind kind: #{kind}" }
Log.debug { "workload_resource_by_kind ymls: #{ymls}" }
resources = ymls.select{|x| x["kind"]?==kind}.reject! {|x|
# reject resources that contain the 'helm.sh/hook: test' annotation
Log.debug { "x[metadata]?: #{x["metadata"]?}" }
Log.debug { "x[metadata][annotations]?: #{x["metadata"]? && x["metadata"]["annotations"]?}" }
x.dig?("metadata","annotations","helm.sh/hook")
}
# end
Log.debug { "resources: #{resources}" }
resources
end
def self.all_workload_resources(yml : Array(YAML::Any), default_namespace : String = "default") : Array(YAML::Any)
resources = KubectlClient::WORKLOAD_RESOURCES.map { |k,v|
Helm.workload_resource_by_kind(yml, v)
}.flatten
# This patch works around a Helm behaviour https://github.com/helm/helm/issues/10737
#
# The below map block inserts "metadata.namespace" key into resources that do not specify a namespace.
# The parsed resource YAML comes from "helm template" command.
#
# The "helm template" command ONLY renders the helm chart with variables substituted.
#
# The YAML output by "helm template" command would only contain the namespace for the resources if:
# 1. The helm chart has hardcoded namespaces.
# 2. OR The helm chart contains a Go variable like below:
# namespace: {{ .Release.Namespace }}
#
# If none of the above are present,
# then the "-n <namespace>" argument passed to "helm template" command is not used anywhere in the output.
# Below is a scenario that causes an issue for cnf_setup:
#
# 0: CNF has helm chart that does not specify namespace for the resources in the YAML chart.
#
# 1. User mentions "helm_install_namespace: hello-world" in CNF config.
#
# 2. cnf_setup installs the Helm chart with "-n hello-world" namespace flag.
#
# 3. cnf_setup calls the CNFManager.workload_resource_test or cnf_workload_resources helper to fetch YAMLs
# The YAMLs are from the "helm template" output.
# And these now do not contain namespace for any resources due to [0] mentioned above.
#
# 4. cnf_setup calls KubectlClient::Get.resource_wait_for_install assuming the resource is in the default namespace.
# Since the resource does not exist, the cnf_setup loops until timeout waiting for install.
#
# Similarly, any test that uses the CNFManager helpers to look for resources in the CNF,
# would also assume default namespace.
#
# To resolve the issue, we insert the namespace into the resource YAMLs being returned.
resources_with_namespace = resources.map do |resource|
ensure_resource_with_namespace(resource, default_namespace)
end
Log.debug { "all resource: #{resources_with_namespace}" }
resources_with_namespace
end
def self.ensure_resource_with_namespace(resource, default_namespace : String)
if resource.dig?("metadata", "namespace") != nil
resource
else
# Required workaround because we cannot assign a key or mutate YAML::Any
# Step-1: Convert resource to Hash(YAML::Any, YAML::Any)
resource = resource.as_h
# Step-2: Convert metadata from YAML:Any to a Hash(YAML::Any, YAML::Any)
metadata = resource["metadata"].as_h
# Step-3: The key in the hash is of type YAML::Any.
# So convert the string "namespace" to YAML.
namespace_yaml_key = YAML.parse("namespace".to_yaml)
# Step-4: Convert default namespace to YAML and assign it to the namespace key
metadata[namespace_yaml_key] = YAML.parse(default_namespace.to_yaml)
# Step-5: Convert the "metadata" key name to YAML::Any
metadata_yaml_key = YAML.parse("metadata".to_yaml)
# Step-6: Set the metadata on the resource
resource[metadata_yaml_key] = YAML.parse(metadata.to_yaml)
resource = YAML.parse(resource.to_yaml)
resource
end
end
def self.workload_resource_names(resources : Array(YAML::Any) )
resource_names = resources.map do |x|
x["metadata"]["name"]
end
Log.debug { "resource names: #{resource_names}" }
resource_names
end
def self.workload_resource_kind_names(resources : Array(YAML::Any), default_namespace : String = "default") : Array(NamedTuple(kind: String, name: String, namespace: String))
resource_names = resources.map do |x|
namespace = (x.dig?("metadata", "namespace") || default_namespace).to_s
{
kind: x["kind"].as_s,
name: x["metadata"]["name"].as_s,
namespace: namespace
}
end
Log.debug { "resource names: #{resource_names}" }
resource_names
end
def self.kind_exists?(args, config, kind, default_namespace : String = "default")
Log.info { "kind_exists?: #{kind}" }
resource_ymls = CNFManager.cnf_workload_resources(args, config) do |resource|
resource
end
default_namespace = "default"
if !config.cnf_config[:helm_install_namespace].empty?
default_namespace = config.cnf_config[:helm_install_namespace]
end
resource_names = Helm.workload_resource_kind_names(resource_ymls, default_namespace: default_namespace)
found = false
resource_names.each do | resource |
if resource[:kind].downcase == kind.downcase
found = true
end
end
Log.info { "kind_exists? found: #{found}" }
found
end
def self.helm_repo_add(helm_repo_name, helm_repo_url)
helm = BinarySingleton.helm
Log.info { "helm_repo_add: helm repo add command: #{helm} repo add #{helm_repo_name} #{helm_repo_url}" }
stdout = IO::Memory.new
stderror = IO::Memory.new
begin
process = Process.new("#{helm}", ["repo", "add", "#{helm_repo_name}", "#{helm_repo_url}"], output: stdout, error: stderror)
status = process.wait
helm_resp = stdout.to_s
error = stderror.to_s
Log.info { "error: #{error}" }
Log.info { "helm_resp (add): #{helm_resp}" }
rescue
Log.info { "helm repo add command critically failed: #{helm} repo add #{helm_repo_name} #{helm_repo_url}" }
end
# Helm version v3.3.3 gave us a surprise
if helm_resp =~ /has been added|already exists/ || error =~ /has been added|already exists/
ret = true
else
ret = false
end
ret
end
def self.helm_gives_k8s_warning?(verbose=false)
helm = BinarySingleton.helm
stdout = IO::Memory.new
stderror = IO::Memory.new
begin
process = Process.new("#{helm}", ["list"], output: stdout, error: stderror)
status = process.wait
helm_resp = stdout.to_s
error = stderror.to_s
Log.info { "error: #{error}" }
Log.info { "helm_resp (add): #{helm_resp}" }
# Helm version v3.3.3 gave us a surprise
if (helm_resp + error) =~ /WARNING: Kubernetes configuration file is/
stdout_failure("For this version of helm you must set your K8s config file permissions to chmod 700") if verbose
true
else
false
end
rescue ex
stdout_failure("Please use newer version of helm")
true
end
end
def self.chart_name(helm_chart_repo)
helm_chart_repo.split("/").last
end
def self.template(release_name, helm_chart_or_directory, output_file : String = "cnfs/temp_template.yml", namespace : String | Nil = nil, values : String | Nil = nil)
helm = BinarySingleton.helm
cmd = "#{helm} template"
if namespace != nil
cmd = "#{cmd} -n #{namespace}"
end
cmd = "#{cmd} #{release_name} #{values} #{helm_chart_or_directory} > #{output_file}"
Log.info { "helm command: #{cmd}" }
status = Process.run(cmd,
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.template output: #{output.to_s}" }
Log.info { "Helm.template stderr: #{stderr.to_s}" }
{status: status, output: output, error: stderr}
end
def self.install(release_name : String, helm_chart : String, namespace = nil, values = nil)
# the way values current work is they are combined with the chart
# (e.g. coredns --values FILENAME.yaml
# or
# coredns --set test.value.test=new_value --set test.value.anothertest=new_value)
# status = Process.run("#{helm} install #{cli}",
# shell: true,
# output: output = IO::Memory.new,
# error: stderr = IO::Memory.new)
install("#{release_name} #{values} #{helm_chart} #{namespace}")
end
def self.install(cli)
helm = BinarySingleton.helm
Log.info { "helm command: #{helm} install #{cli}" }
status = Process.run("#{helm} install #{cli}",
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.install output: #{output.to_s}" }
Log.info { "Helm.install stderr: #{stderr.to_s}" }
if CannotReuseReleaseNameError.error_text_content_match?(stderr.to_s)
raise CannotReuseReleaseNameError.new
end
# When calling Helm.install. Do not rescue from this error.
# This helps catch those one-off scenarios when helm install fails
#
# Examples:
# * https://github.com/helm/helm/issues/10285
# * Also check platform observability failure in this build - https://github.com/cncf/cnf-testsuite/runs/5308701193?check_suite_focus=true
result = InstallationFailed.error_text(stderr.to_s)
if result
raise InstallationFailed.new("Helm install error: #{result}")
end
{status: status, output: output, error: stderr}
end
def self.uninstall(cli)
helm = BinarySingleton.helm
Log.info { "helm command: #{helm} uninstall #{cli}" }
status = Process.run("#{helm} uninstall #{cli}",
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.uninstall output: #{output.to_s}" }
Log.info { "Helm.uninstall stderr: #{stderr.to_s}" }
{status: status, output: output, error: stderr}
end
def self.delete(cli)
helm = BinarySingleton.helm
Log.info { "helm command: #{helm} delete #{cli}" }
status = Process.run("#{helm} delete #{cli}",
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.install delete: #{output.to_s}" }
Log.info { "Helm.install delete: #{stderr.to_s}" }
{status: status, output: output, error: stderr}
end
def self.pull(cli)
helm = BinarySingleton.helm
Log.info { "helm command: #{helm} pull #{cli}" }
status = Process.run("#{helm} pull #{cli}",
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.pull output: #{output.to_s}" }
Log.info { "Helm.pull stderr: #{stderr.to_s}" }
{status: status, output: output, error: stderr}
end
def self.fetch(cli)
helm = BinarySingleton.helm
Log.info { "helm command: #{helm} fetch #{cli}" }
status = Process.run("#{helm} fetch #{cli}",
shell: true,
output: output = IO::Memory.new,
error: stderr = IO::Memory.new)
Log.info { "Helm.fetch output: #{output.to_s}" }
Log.info { "Helm.fetch stderr: #{stderr.to_s}" }
{status: status, output: output, error: stderr}
end
class CannotReuseReleaseNameError < Exception
def self.error_text_content_match?(str : String)
str.includes? "cannot re-use a name that is still in use"
end
end
class InstallationFailed < Exception
MESSAGE_REGEX = /Error: INSTALLATION FAILED: (.+)$/
def self.error_text(str : String) : String?
result = MESSAGE_REGEX.match(str)
return result[1] if result
return nil
end
end
class ManifestGenerationError < Exception
def initialize(stderr : String)
super("✖ ERROR: generating manifest was not successfull.\nHelm stderr --> #{stderr}")
end
end
end