-
Notifications
You must be signed in to change notification settings - Fork 213
/
prow.sh
executable file
·1443 lines (1293 loc) · 64.1 KB
/
prow.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#! /bin/bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs inside a Prow job. It can run unit tests ("make test")
# and E2E testing. This E2E testing covers different scenarios (see
# https://github.com/kubernetes/enhancements/pull/807):
# - running the stable hostpath example against a Kubernetes release
# - running the canary hostpath example against a Kubernetes release
# - building the component in the current repo and running the
# stable hostpath example with that one component replaced against
# a Kubernetes release
#
# The intended usage of this script is that individual repos import
# csi-release-tools, then link their top-level prow.sh to this or
# include it in that file. When including it, several of the variables
# can be overridden in the top-level prow.sh to customize the script
# for the repo.
#
# The expected environment is:
# - $GOPATH/src/<import path> for the repository that is to be tested,
# with PR branch merged (when testing a PR)
# - running on linux-amd64
# - kind (https://github.com/kubernetes-sigs/kind) installed
# - optional: Go already installed
RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
REPO_DIR="$(pwd)"
# Sets the default value for a variable if not set already and logs the value.
# Any variable set this way is usually something that a repo's .prow.sh
# or the job can set.
configvar () {
# Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"?
# shellcheck disable=SC2140
eval : \$\{"$1":="\$2"\}
eval echo "\$3:" "$1=\${$1}"
}
# Prints the value of a variable + version suffix, falling back to variable + "LATEST".
get_versioned_variable () {
local var="$1"
local version="$2"
local value
eval value="\${${var}_${version}}"
if ! [ "$value" ]; then
eval value="\${${var}_LATEST}"
fi
echo "$value"
}
# This takes a version string like CSI_PROW_KUBERNETES_VERSION and
# maps it to the corresponding git tag, branch or commit.
version_to_git () {
version="$1"
shift
case "$version" in
latest|master) echo "master";;
release-*) echo "$version";;
*) echo "v$version";;
esac
}
# the list of windows versions was matched from:
# - https://hub.docker.com/_/microsoft-windows-nanoserver
# - https://hub.docker.com/_/microsoft-windows-servercore
configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -ppc64le; linux s390x s390x -s390x; linux arm arm -arm; linux arm64 arm64 -arm64; linux arm arm/v7 -armv7; windows amd64 amd64 .exe nanoserver:1809 servercore:ltsc2019; windows amd64 amd64 .exe nanoserver:ltsc2022 servercore:ltsc2022" "Go target platforms (= GOOS + GOARCH) and file suffix of the resulting binaries"
# If we have a vendor directory, then use it. We must be careful to only
# use this for "make" invocations inside the project's repo itself because
# setting it globally can break other go usages (like "go get <some command>"
# which is disabled with GOFLAGS=-mod=vendor).
configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags for using the vendor directory"
configvar CSI_PROW_GO_VERSION_BUILD "1.23.1" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e
configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below
configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below
configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below
# ginkgo test runner version to use. If the pre-installed version is
# different, the desired version is built from source. For Kubernetes,
# the version built via "make WHAT=vendor/github.com/onsi/ginkgo/ginkgo" is
# used, which is guaranteed to match what the Kubernetes e2e.test binary
# needs.
configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo"
# Ginkgo runs the E2E test in parallel. The default is based on the number
# of CPUs, but typically this can be set to something higher in the job.
configvar CSI_PROW_GINKGO_PARALLEL "-p" "Ginkgo parallelism parameter(s)"
# Timeout value for the overall ginkgo test suite.
configvar CSI_PROW_GINKGO_TIMEOUT "1h" "Ginkgo timeout"
# Enables building the code in the repository. On by default, can be
# disabled in jobs which only use pre-built components.
configvar CSI_PROW_BUILD_JOB true "building code in repo enabled"
# Kubernetes version to test against. This must be a version number
# (like 1.13.3), "latest" (builds Kubernetes from the master branch)
# or "release-x.yy" (builds Kubernetes from a release branch).
#
# The patch version is only relevant for picking the E2E test suite
# that is used for testing. The script automatically picks
# the kind images for the major/minor version of Kubernetes
# that the kind release supports.
#
# This can also be a version that was not released yet at the time
# that the settings below were chose. The script will then
# use the same settings as for "latest" Kubernetes. This works
# as long as there are no breaking changes in Kubernetes, like
# deprecating or changing the implementation of an alpha feature.
configvar CSI_PROW_KUBERNETES_VERSION 1.22.0 "Kubernetes"
# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and
# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST
# instead of latest).
#
# This is used to derive the right defaults for the variables below
# when a Prow job just defines the Kubernetes version.
csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')"
# Only the latest KinD is (eventually) guaranteed to work with the
# latest Kubernetes. For example, KinD 0.10.0 failed with Kubernetes
# 1.21.0-beta1. Therefore the default version of KinD is "main"
# for that, otherwise the latest stable release for which we then
# list the officially supported images below.
kind_version_default () {
case "${CSI_PROW_KUBERNETES_VERSION}" in
latest|master)
echo main;;
*)
echo v0.14.0;;
esac
}
# kind version to use. If the pre-installed version is different,
# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases
# (if available), otherwise it is built from source.
configvar CSI_PROW_KIND_VERSION "$(kind_version_default)" "kind"
# kind images to use. Must match the kind version.
# The release notes of each kind release list the supported images.
configvar CSI_PROW_KIND_IMAGES "kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
kindest/node:v1.23.6@sha256:b1fa224cc6c7ff32455e0b1fd9cbfd3d3bc87ecaa8fcb06961ed1afb3db0f9ae
kindest/node:v1.22.9@sha256:8135260b959dfe320206eb36b3aeda9cffcb262f4b44cda6b33f7bb73f453105
kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207
kindest/node:v1.20.15@sha256:6f2d011dffe182bad80b85f6c00e8ca9d86b5b8922cdf433d53575c4c5212248
kindest/node:v1.19.16@sha256:d9c819e8668de8d5030708e484a9fdff44d95ec4675d136ef0a0a584e587f65c
kindest/node:v1.18.20@sha256:738cdc23ed4be6cc0b7ea277a2ebcc454c8373d7d8fb991a7fcdbd126188e6d7" "kind images"
# By default, this script tests sidecars with the CSI hostpath driver,
# using the install_csi_driver function. That function depends on
# a deployment script that it searches for in several places:
#
# - The "deploy" directory in the current repository: this is useful
# for the situation that a component becomes incompatible with the
# shared deployment, because then it can (temporarily!) provide its
# own example until the shared one can be updated; it's also how
# csi-driver-host-path itself provides the example.
#
# - CSI_PROW_DRIVER_VERSION of the CSI_PROW_DRIVER_REPO is checked
# out: this allows other repos to reference a version of the example
# that is known to be compatible.
#
# - The <driver repo>/deploy directory can have multiple sub-directories,
# each with different deployments (stable set of images for Kubernetes 1.13,
# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.).
# This is necessary because there may be incompatible changes in the
# "API" of a component (for example, its command line options or RBAC rules)
# or in its support for different Kubernetes versions (CSIDriverInfo as
# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14).
#
# When testing an update for a component in a PR job, the
# CSI_PROW_DEPLOYMENT variable can be set in the
# .prow.sh of each component when there are breaking changes
# that require using a non-default deployment. The default
# is a deployment named "kubernetes-x.yy${CSI_PROW_DEPLOYMENT_SUFFIX}" (if available),
# otherwise "kubernetes-latest${CSI_PROW_DEPLOYMENT_SUFFIX}".
# "none" disables the deployment of the hostpath driver.
#
# When no deploy script is found (nothing in `deploy` directory,
# CSI_PROW_DRIVER_REPO=none), nothing gets deployed.
#
# If the deployment script is called with CSI_PROW_TEST_DRIVER=<file name> as
# environment variable, then it must write a suitable test driver configuration
# into that file in addition to installing the driver.
configvar CSI_PROW_DRIVER_VERSION "v1.15.0" "CSI driver version"
configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo"
configvar CSI_PROW_DEPLOYMENT "" "deployment"
configvar CSI_PROW_DEPLOYMENT_SUFFIX "" "additional suffix in kubernetes-x.yy[suffix].yaml files"
# The install_csi_driver function may work also for other CSI drivers,
# as long as they follow the conventions of the CSI hostpath driver.
# If they don't, then a different install function can be provided in
# a .prow.sh file and this config variable can be overridden.
configvar CSI_PROW_DRIVER_INSTALL "install_csi_driver" "name of the shell function which installs the CSI driver"
# If CSI_PROW_DRIVER_CANARY is set (typically to "canary", but also
# version tag. Usually empty. CSI_PROW_HOSTPATH_CANARY is
# accepted as alternative name because some test-infra jobs
# still use that name.
configvar CSI_PROW_DRIVER_CANARY "${CSI_PROW_HOSTPATH_CANARY}" "driver image override for canary images"
# Image registry to use for canary images.
# Only valid if CSI_PROW_DRIVER_CANARY == "canary".
configvar CSI_PROW_DRIVER_CANARY_REGISTRY "gcr.io/k8s-staging-sig-storage" "registry for canary images"
# The E2E testing can come from an arbitrary repo. The expectation is that
# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836)
# after setting KUBECONFIG. As a special case, if the repository is Kubernetes,
# then `make WHAT=test/e2e/e2e.test` is called first to ensure that
# all generated files are present.
#
# CSI_PROW_E2E_REPO=none disables E2E testing.
configvar CSI_PROW_E2E_VERSION "$(version_to_git "${CSI_PROW_KUBERNETES_VERSION}")" "E2E version"
configvar CSI_PROW_E2E_REPO "https://github.com/kubernetes/kubernetes" "E2E repo"
configvar CSI_PROW_E2E_IMPORT_PATH "k8s.io/kubernetes" "E2E package"
# Local path & package path for e2e tests. Set to "none" to disable.
# When using versioned go modules, the import path is the module path whereas the path
# should not contain the version and be the directory where the module is checked out.
configvar CSI_PROW_SIDECAR_E2E_IMPORT_PATH "none" "CSI Sidecar E2E package (go import path)"
configvar CSI_PROW_SIDECAR_E2E_PATH "${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}" "CSI Sidecar E2E path (directory)"
# csi-sanity testing from the csi-test repo can be run against the installed
# CSI driver. For this to work, deploying the driver must expose the Unix domain
# csi.sock as a TCP service for use by the csi-sanity command, which runs outside
# of the cluster. The alternative would have been to (cross-)compile csi-sanity
# and install it inside the cluster, which is not necessarily easier.
configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo"
configvar CSI_PROW_SANITY_VERSION v5.3.1 "csi-test version"
configvar CSI_PROW_SANITY_PACKAGE_PATH github.com/kubernetes-csi/csi-test "csi-test package"
configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock"
configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver"
configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver"
# The version of dep to use for 'make test-vendor'. Ignored if the project doesn't
# use dep. Only binary releases of dep are supported (https://github.com/golang/dep/releases).
configvar CSI_PROW_DEP_VERSION v0.5.4 "golang dep version to be used for vendor checking"
# Each job can run one or more of the following tests, identified by
# a single word:
# - unit testing
# - parallel excluding alpha features
# - serial excluding alpha features
# - parallel, only alpha feature
# - serial, only alpha features
# - sanity
#
# Unknown or unsupported entries are ignored.
#
# Testing of alpha features is only supported for CSI_PROW_KUBERNETES_VERSION=latest
# because CSI_PROW_E2E_ALPHA and CSI_PROW_E2E_ALPHA_GATES are not set for
# older Kubernetes releases. The script supports that, it just isn't done because
# it is not needed and would cause additional maintenance effort.
#
# Sanity testing with csi-sanity only covers the CSI driver itself and
# thus only makes sense in repos which provide their own CSI
# driver. Repos can enable sanity testing by setting
# CSI_PROW_TESTS_SANITY=sanity.
configvar CSI_PROW_TESTS "unit parallel serial $(if [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ]; then echo parallel-alpha serial-alpha; fi) sanity" "tests to run"
tests_enabled () {
local t1 t2
# We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a.
# shellcheck disable=SC2206
local tests=(${CSI_PROW_TESTS})
for t1 in "$@"; do
for t2 in "${tests[@]}"; do
if [ "$t1" = "$t2" ]; then
return
fi
done
done
return 1
}
sanity_enabled () {
[ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity"
}
sidecar_tests_enabled () {
[ "${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}" != "none" ]
}
tests_need_kind () {
tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" ||
sanity_enabled || sidecar_tests_enabled
}
tests_need_non_alpha_cluster () {
tests_enabled "parallel" "serial" ||
sanity_enabled || sidecar_tests_enabled
}
tests_need_alpha_cluster () {
tests_enabled "parallel-alpha" "serial-alpha"
}
# Enabling mock tests adds the "CSI mock volume" tests from https://github.com/kubernetes/kubernetes/blob/HEAD/test/e2e/storage/csi_mock_volume.go
# to the e2e.test invocations (serial, parallel, and the corresponding alpha variants).
# When testing canary images, those get used instead of the images specified
# in the e2e.test's normal YAML files.
#
# The default is to enable this for all jobs which use canary images
# and the latest Kubernetes because those images will be used for mock
# testing once they are released. Using them for mock testing with
# older Kubernetes releases is too risky because the deployment files
# can be very old (for example, still using a removed -provisioner
# parameter in external-provisioner).
configvar CSI_PROW_E2E_MOCK "$(if [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ] && [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ]; then echo true; else echo false; fi)" "enable CSI mock volume tests"
# Regex for non-alpha, feature-tagged tests that should be run.
#
configvar CSI_PROW_E2E_FOCUS_LATEST '\[Feature:VolumeSnapshotDataSource\]' "non-alpha, feature-tagged tests for latest Kubernetes version"
configvar CSI_PROW_E2E_FOCUS "$(get_versioned_variable CSI_PROW_E2E_FOCUS "${csi_prow_kubernetes_version_suffix}")" "non-alpha, feature-tagged tests"
# Serial vs. parallel is always determined by these regular expressions.
# Individual regular expressions are separated by spaces for readability
# and expected to not contain spaces. Use dots instead. The complete
# regex for Ginkgo will be created by joining the individual terms.
configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests"
regex_join () {
echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g'
}
# Which tests are alpha depends on the Kubernetes version. We could
# use the same E2E test for all Kubernetes version. This would have
# the advantage that new tests can be applied to older versions
# without having to backport tests.
#
# But the feature tag gets removed from E2E tests when the corresponding
# feature becomes beta, so we would have to track which tests were
# alpha in previous Kubernetes releases. This was considered too
# error prone. Therefore we use E2E tests that match the Kubernetes
# version that is getting tested.
configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for latest Kubernetes version" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough
configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests"
# After the parallel E2E test without alpha features, a test cluster
# with alpha features is brought up and tests that were previously
# disabled are run. The alpha gates in each release have to be listed
# explicitly. If none are set (= variable empty), alpha testing
# is skipped.
#
# Testing against "latest" Kubernetes is problematic because some alpha
# feature which used to work might stop working or change their behavior
# such that the current tests no longer pass. If that happens,
# kubernetes-csi components must be updated, either by disabling
# the failing test for "latest" or by updating the test and not running
# it anymore for older releases.
configvar CSI_PROW_E2E_ALPHA_GATES_LATEST '' "alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates"
configvar CSI_PROW_E2E_GATES_LATEST '' "non alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_GATES "$(get_versioned_variable CSI_PROW_E2E_GATES "${csi_prow_kubernetes_version_suffix}")" "non alpha E2E feature gates"
# Focus for local tests run in the sidecar E2E repo. Only used if CSI_PROW_SIDECAR_E2E_IMPORT_PATH
# is not set to "none". If empty, all tests in the sidecar repo will be run.
configvar CSI_PROW_SIDECAR_E2E_FOCUS '' "tags for local E2E tests"
configvar CSI_PROW_SIDECAR_E2E_SKIP '' "local tests that need to be skipped"
# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment
default_csi_snapshotter_version () {
if [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ] || [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ]; then
echo "master"
else
echo "v4.0.0"
fi
}
configvar CSI_SNAPSHOTTER_VERSION "$(default_csi_snapshotter_version)" "external-snapshotter version tag"
# Some tests are known to be unusable in a KinD cluster. For example,
# stopping kubelet with "ssh <node IP> systemctl stop kubelet" simply
# doesn't work. Such tests should be written in a way that they verify
# whether they can run with the current cluster provider, but until
# they are, we filter them out by name. Like the other test selection
# variables, this is again a space separated list of regular expressions.
configvar CSI_PROW_E2E_SKIP '\[Disruptive\]|\[Feature:SELinux\]' "tests that need to be skipped"
# This creates directories that are required for testing.
ensure_paths () {
# Work directory. It has to allow running executables, therefore /tmp
# is avoided. Cleaning up after the script is intentionally left to
# the caller.
configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory"
# This is the directory for additional result files. Usually set by Prow, but
# if not (for example, when invoking manually) it defaults to the work directory.
configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts"
mkdir -p "${ARTIFACTS}"
# For additional tools.
CSI_PROW_BIN="${CSI_PROW_WORK}/bin"
mkdir -p "${CSI_PROW_BIN}"
PATH="${CSI_PROW_BIN}:$PATH"
}
run () {
echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2
"$@"
}
info () {
echo >&2 INFO: "$@"
}
warn () {
echo >&2 WARNING: "$@"
}
die () {
echo >&2 ERROR: "$@"
exit 1
}
# Ensure we use the desired version of the Go tools, then run command given as argument.
# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by
# bumping the container image regularly.
run_with_go () {
local version
version="$1"
shift
if [ "$version" ]; then
version=go$version
if [ "$(GOTOOLCHAIN=$version go version | cut -d' ' -f3)" != "$version" ]; then
die "Please install Go 1.21+"
fi
else
version=local
fi
# Set GOMODCACHE to make sure Kubernetes does not need to download again.
GOTOOLCHAIN=$version GOMODCACHE="$(go env GOMODCACHE)" run "$@"
}
# Ensure that we have the desired version of kind.
install_kind () {
if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then
return
fi
if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then
chmod u+x "${CSI_PROW_WORK}/bin/kind"
else
git_checkout https://github.com/kubernetes-sigs/kind "${GOPATH}/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 &&
(cd "${GOPATH}/src/sigs.k8s.io/kind" && run_with_go "$CSI_PROW_GO_VERSION_KIND" make install INSTALL_DIR="${CSI_PROW_WORK}/bin")
fi
}
# Ensure that we have the desired version of the ginkgo test runner.
install_ginkgo () {
if [ -e "${CSI_PROW_BIN}/ginkgo" ]; then
return
fi
# CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not.
if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then
return
fi
run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" env GOBIN="${CSI_PROW_BIN}" go install "github.com/onsi/ginkgo/ginkgo@${CSI_PROW_GINKGO_VERSION}" || die "building ginkgo failed"
}
# Ensure that we have the desired version of dep.
install_dep () {
if dep version 2>/dev/null | grep -q "version:.*${CSI_PROW_DEP_VERSION}$"; then
return
fi
run curl --fail --location -o "${CSI_PROW_WORK}/bin/dep" "https://github.com/golang/dep/releases/download/${CSI_PROW_DEP_VERSION}/dep-linux-amd64" &&
chmod u+x "${CSI_PROW_WORK}/bin/dep"
}
# This checks out a repo ("https://github.com/kubernetes/kubernetes")
# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at
# a certain revision (a hex commit hash, v1.13.1, master). It's okay
# for that directory to exist already.
git_checkout () {
local repo path revision
repo="$1"
shift
path="$1"
shift
revision="$1"
shift
mkdir -p "$path"
if ! [ -d "$path/.git" ]; then
run git init "$path"
fi
if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then
(cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed"
else
# Might have been because fetching by revision is not
# supported by GitHub (https://github.com/isaacs/github/issues/436).
# Fall back to fetching everything.
(cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed"
(cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed"
fi
# This is useful for local testing or when switching between different revisions in the same
# repo.
(cd "$path" && run git clean -fdx) || die "failed to clean $path"
}
# This clones a repo ("https://github.com/kubernetes/kubernetes")
# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at
# a the head of a specific branch (i.e., release-1.13, master),
# tag (v1.20.0) or commit.
#
# The directory must not exist.
git_clone () {
local repo path name parent
repo="$1"
shift
path="$1"
shift
name="$1"
shift
parent="$(dirname "$path")"
mkdir -p "$parent"
(cd "$parent" && run git clone --single-branch --branch "$name" "$repo" "$path") || die "cloning $repo" failed
# This is useful for local testing or when switching between different revisions in the same
# repo.
(cd "$path" && run git clean -fdx) || die "failed to clean $path"
}
list_gates () (
set -f; IFS=','
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
set -- $1
while [ "$1" ]; do
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/'
shift
done
)
# Turn feature gates in the format foo=true,bar=false into
# a YAML map with the corresponding API groups for use
# with https://kind.sigs.k8s.io/docs/user/configuration/#runtime-config
list_api_groups () (
set -f; IFS=','
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
set -- $1
while [ "$1" ]; do
if [ "$1" = 'CSIStorageCapacity=true' ]; then
echo ' "storage.k8s.io/v1alpha1": "true"'
fi
shift
done
)
go_version_for_kubernetes () (
local path="$1"
local version="$2"
local go_version
# Try to get the version for .go-version
go_version="$( cat "$path/.go-version" )"
if [ "$go_version" ]; then
echo "$go_version"
return
fi
# Fall back to hack/lib/golang.sh parsing.
# This is necessary in v1.26.0 and older Kubernetes releases that do not have .go-version.
# More recent versions might also work, but we don't want to count on that.
go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')"
if ! [ "$go_version" ]; then
die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh."
fi
# Strip the trailing .0. Kubernetes includes it, Go itself doesn't.
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
go_version="$(echo "$go_version" | sed -e 's/\.0$//')"
echo "$go_version"
)
csi_prow_kind_have_kubernetes=false
# Brings up a Kubernetes cluster and sets KUBECONFIG.
# Accepts additional feature gates in the form gate1=true|false,gate2=...
start_cluster () {
local image gates
gates="$1"
if kind get clusters | grep -q csi-prow; then
run kind delete cluster --name=csi-prow || die "kind delete failed"
fi
# Try to find a pre-built kind image if asked to use a specific version.
if ! [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
major_minor=$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/^\([0-9]*\)\.\([0-9]*\).*/\1.\2/')
for i in ${CSI_PROW_KIND_IMAGES}; do
if echo "$i" | grep -q "kindest/node:v${major_minor}"; then
image="$i"
break
fi
done
fi
# Need to build from source?
if ! [ "$image" ]; then
if ! ${csi_prow_kind_have_kubernetes}; then
local version="${CSI_PROW_KUBERNETES_VERSION}"
if [ "$version" = "latest" ]; then
version=master
fi
git_clone https://github.com/kubernetes/kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$(version_to_git "$version")" || die "checking out Kubernetes $version failed"
go_version="$(go_version_for_kubernetes "${CSI_PROW_WORK}/src/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes"
# Changing into the Kubernetes source code directory is a workaround for https://github.com/kubernetes-sigs/kind/issues/1910
# shellcheck disable=SC2046
(cd "${CSI_PROW_WORK}/src/kubernetes" && run_with_go "$go_version" kind build node-image "${CSI_PROW_WORK}/src/kubernetes" --image csiprow/node:latest) || die "'kind build node-image' failed"
csi_prow_kind_have_kubernetes=true
fi
image="csiprow/node:latest"
fi
cat >"${CSI_PROW_WORK}/kind-config.yaml" <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
featureGates:
$(list_gates "$gates")
runtimeConfig:
$(list_api_groups "$gates")
EOF
info "kind-config.yaml:"
cat "${CSI_PROW_WORK}/kind-config.yaml"
if ! run kind create cluster --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then
warn "Cluster creation failed. Will try again with higher verbosity."
info "Available Docker images:"
docker image ls
if ! run kind --loglevel debug create cluster --retain --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then
run kind export logs --name csi-prow "$ARTIFACTS/kind-cluster"
die "Cluster creation failed again, giving up. See the 'kind-cluster' artifact directory for additional logs."
fi
fi
export KUBECONFIG="${HOME}/.kube/config"
}
# Deletes kind cluster inside a prow job
delete_cluster_inside_prow_job() {
local name="$1"
# Inside a real Prow job it is better to clean up at runtime
# instead of leaving that to the Prow job cleanup code
# because the later sometimes times out (https://github.com/kubernetes-csi/csi-release-tools/issues/24#issuecomment-554765872).
#
# This is also a good time to collect logs.
if [ "$JOB_NAME" ]; then
if kind get clusters | grep -q csi-prow; then
run kind export logs --name=csi-prow "${ARTIFACTS}/cluster-logs/$name"
run kind delete cluster --name=csi-prow || die "kind delete failed"
fi
unset KUBECONFIG
fi
}
# Looks for the deployment as specified by CSI_PROW_DEPLOYMENT and CSI_PROW_KUBERNETES_VERSION
# in the given directory.
find_deployment () {
local dir="$1"
local file
# major/minor without release- prefix.
local k8sver
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
k8sver="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/^release-//' -e 's/\([0-9]*\)\.\([0-9]*\).*/\1.\2/')"
# Desired deployment, either specified completely, including version, or derived from other variables.
local deployment
deployment=${CSI_PROW_DEPLOYMENT:-kubernetes-${k8sver}${CSI_PROW_DEPLOYMENT_SUFFIX}}
# Fixed deployment name? Use it if it exists.
if [ "${CSI_PROW_DEPLOYMENT}" ]; then
file="$dir/${CSI_PROW_DEPLOYMENT}/deploy.sh"
if [ -e "$file" ]; then
echo "$file"
return 0
fi
# CSI_PROW_DEPLOYMENT=kubernetes-x.yy must be mapped to kubernetes-latest
# as fallback. Same for kubernetes-distributed-x.yy.
fi
file="$dir/${deployment}/deploy.sh"
if ! [ -e "$file" ]; then
# Replace the first xx.yy number with "latest", for example
# kubernetes-1.21-test -> kubernetes-latest-test.
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
file="$dir/$(echo "$deployment" | sed -e 's/[0-9][0-9]*\.[0-9][0-9]*/latest/')/deploy.sh"
if ! [ -e "$file" ]; then
return 1
fi
fi
echo "$file"
}
# This installs the CSI driver. It's called with a list of env variables
# that override the default images. CSI_PROW_DRIVER_CANARY overrides all
# image versions with that canary version.
install_csi_driver () {
local images deploy_driver
images="$*"
if [ "${CSI_PROW_DEPLOYMENT}" = "none" ]; then
return 1
fi
if ${CSI_PROW_BUILD_JOB}; then
# Ignore: Double quote to prevent globbing and word splitting.
# Ignore: To read lines rather than words, pipe/redirect to a 'while read' loop.
# shellcheck disable=SC2086 disable=SC2013
for i in $(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//'); do
kind load docker-image --name csi-prow $i:csiprow || die "could not load the $i:latest image into the kind cluster"
done
fi
if deploy_driver="$(find_deployment "$(pwd)/deploy")"; then
:
elif [ "${CSI_PROW_DRIVER_REPO}" = "none" ]; then
return 1
else
git_checkout "${CSI_PROW_DRIVER_REPO}" "${CSI_PROW_WORK}/csi-driver" "${CSI_PROW_DRIVER_VERSION}" --depth=1 || die "checking out CSI driver repo failed"
if deploy_driver="$(find_deployment "${CSI_PROW_WORK}/csi-driver/deploy")"; then
:
else
die "deploy.sh not found in ${CSI_PROW_DRIVER_REPO} ${CSI_PROW_DRIVER_VERSION}. To disable E2E testing, set CSI_PROW_DRIVER_REPO=none"
fi
fi
if [ "${CSI_PROW_DRIVER_CANARY}" != "stable" ]; then
if [ "${CSI_PROW_DRIVER_CANARY}" == "canary" ]; then
images="$images IMAGE_TAG=${CSI_PROW_DRIVER_CANARY} IMAGE_REGISTRY=${CSI_PROW_DRIVER_CANARY_REGISTRY}"
else
images="$images IMAGE_TAG=${CSI_PROW_DRIVER_CANARY}"
fi
fi
# Ignore: Double quote to prevent globbing and word splitting.
# It's intentional here for $images.
# shellcheck disable=SC2086
if ! run env "CSI_PROW_TEST_DRIVER=${CSI_PROW_WORK}/test-driver.yaml" $images "${deploy_driver}"; then
# Collect information about failed deployment before failing.
collect_cluster_info
(start_loggers >/dev/null; wait)
info "For container output see job artifacts."
die "deploying the CSI driver with ${deploy_driver} failed"
fi
}
# Installs all necessary snapshotter CRDs
install_snapshot_crds() {
# Wait until volumesnapshot CRDs are in place.
CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/client/config/crd"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
CRD_BASE_DIR="${REPO_DIR}/client/config/crd"
fi
echo "Installing snapshot CRDs from ${CRD_BASE_DIR}"
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml" --validate=false
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml" --validate=false
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml" --validate=false
cnt=0
until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io \
&& kubectl get volumesnapshots.snapshot.storage.k8s.io \
&& kubectl get volumesnapshotcontents.snapshot.storage.k8s.io; do
if [ $cnt -gt 30 ]; then
echo >&2 "ERROR: snapshot CRDs not ready after over 1 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot CRDs, attempt #$cnt"
cnt=$((cnt + 1))
sleep 2
done
}
# Install snapshot controller and associated RBAC, retrying until the pod is running.
install_snapshot_controller() {
CONTROLLER_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
CONTROLLER_DIR="${REPO_DIR}"
fi
SNAPSHOT_RBAC_YAML="${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml"
echo "kubectl apply -f ${SNAPSHOT_RBAC_YAML}"
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
kubectl apply -f ${SNAPSHOT_RBAC_YAML}
cnt=0
until kubectl get clusterrolebinding snapshot-controller-role; do
if [ $cnt -gt 30 ]; then
echo "Cluster role bindings:"
kubectl describe clusterrolebinding
echo >&2 "ERROR: snapshot controller RBAC not ready after over 5 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done
SNAPSHOT_CONTROLLER_YAML="${CONTROLLER_DIR}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
if [[ ${REPO_DIR} == *"external-snapshotter"* ]]; then
# snapshot-controller image built from the PR will get a "csiprow" tag.
# Load it into the "kind" cluster so that we can deploy it.
NEW_TAG="csiprow"
NEW_IMG="snapshot-controller:${NEW_TAG}"
echo "kind load docker-image --name csi-prow ${NEW_IMG}"
kind load docker-image --name csi-prow ${NEW_IMG} || die "could not load the snapshot-controller:csiprow image into the kind cluster"
# deploy snapshot-controller
echo "Deploying snapshot-controller from ${SNAPSHOT_CONTROLLER_YAML} with $NEW_IMG."
# Replace image in SNAPSHOT_CONTROLLER_YAML with snapshot-controller:csiprow and deploy
# NOTE: This logic is similar to the logic here:
# https://github.com/kubernetes-csi/csi-driver-host-path/blob/v1.4.0/deploy/util/deploy-hostpath.sh#L155
# Ignore: Double quote to prevent globbing and word splitting.
# shellcheck disable=SC2086
# Ignore: Use find instead of ls to better handle non-alphanumeric filenames.
# shellcheck disable=SC2012
for i in $(ls ${SNAPSHOT_CONTROLLER_YAML} | sort); do
echo " $i"
# Ignore: Useless cat. Consider 'cmd < file | ..' or 'cmd file | ..' instead.
# shellcheck disable=SC2002
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
modified="$(cat "$i" | while IFS= read -r line; do
nocomments="$(echo "$line" | sed -e 's/ *#.*$//')"
if echo "$nocomments" | grep -q '^[[:space:]]*image:[[:space:]]*'; then
# Split 'image: registry.k8s.io/sig-storage/snapshot-controller:v3.0.0'
# into image (snapshot-controller:v3.0.0),
# name (snapshot-controller),
# tag (v3.0.0).
image=$(echo "$nocomments" | sed -e 's;.*image:[[:space:]]*;;')
name=$(echo "$image" | sed -e 's;.*/\([^:]*\).*;\1;')
tag=$(echo "$image" | sed -e 's;.*:;;')
# Now replace registry and/or tag
NEW_TAG="csiprow"
line="$(echo "$nocomments" | sed -e "s;$image;${name}:${NEW_TAG};")"
echo " using $line" >&2
fi
echo "$line"
done)"
if ! echo "$modified" | kubectl apply -f -; then
echo "modified version of $i:"
echo "$modified"
exit 1
fi
done
elif [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ]; then
echo "Deploying snapshot-controller from ${SNAPSHOT_CONTROLLER_YAML} with canary images."
yaml="$(kubectl apply --dry-run=client -o yaml -f "$SNAPSHOT_CONTROLLER_YAML")"
# Ignore: See if you can use ${variable//search/replace} instead.
# shellcheck disable=SC2001
modified="$(echo "$yaml" | sed -e "s;image: .*/\([^/:]*\):.*;image: ${CSI_PROW_DRIVER_CANARY_REGISTRY}/\1:canary;")"
diff <(echo "$yaml") <(echo "$modified")
if ! echo "$modified" | kubectl apply -f -; then
echo "modified version of $SNAPSHOT_CONTROLLER_YAML:"
echo "$modified"
exit 1
fi
else
echo "kubectl apply -f $SNAPSHOT_CONTROLLER_YAML"
kubectl apply -f "$SNAPSHOT_CONTROLLER_YAML"
fi
cnt=0
expected_running_pods=$(kubectl apply --dry-run=client -o "jsonpath={.spec.replicas}" -f "$SNAPSHOT_CONTROLLER_YAML")
expected_namespace=$(kubectl apply --dry-run=client -o "jsonpath={.metadata.namespace}" -f "$SNAPSHOT_CONTROLLER_YAML")
expect_key='app\.kubernetes\.io/name'
expected_label=$(kubectl apply --dry-run=client -o "jsonpath={.spec.template.metadata.labels['$expect_key']}" -f "$SNAPSHOT_CONTROLLER_YAML")
if [ -z "${expected_label}" ]; then
expect_key='app'
expected_label=$(kubectl apply --dry-run=client -o "jsonpath={.spec.template.metadata.labels['$expect_key']}" -f "$SNAPSHOT_CONTROLLER_YAML")
fi
expect_key=${expect_key//\\/}
while [ "$(kubectl get pods -n "$expected_namespace" -l "$expect_key"="$expected_label" | grep 'Running' -c)" -lt "$expected_running_pods" ]; do
if [ $cnt -gt 30 ]; then
echo "snapshot-controller pod status:"
kubectl describe pods -n "$expected_namespace" -l "$expect_key"="$expected_label"
echo >&2 "ERROR: snapshot controller not ready after over 5 min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done
}
# collect logs and cluster status (like the version of all components, Kubernetes version, test version)
collect_cluster_info () {
cat <<EOF
=========================================================
Kubernetes:
$(kubectl version)
Driver installation in default namespace:
$(kubectl get all)
Images in cluster:
REPOSITORY TAG REVISION
$(
# Here we iterate over all images that are in use and print some information about them.
# The "revision" label is where our build process puts the version number and revision,
# which is always unique, in contrast to the tag (think "canary"...).
docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep -e csi -e hostpath | while read -r repo tag id; do
echo "$repo" "$tag" "$(docker exec csi-prow-control-plane docker image inspect --format='{{ index .Config.Labels "revision"}}' "$id")"
done
)
=========================================================
EOF
}
# Gets logs of all containers in all namespaces. When passed -f, kubectl will
# keep running and capture new output. Prints the pid of all background processes.
# The caller must kill (when using -f) and/or wait for them.
#
# May be called multiple times and thus appends.
start_loggers () {
kubectl get pods --all-namespaces -o go-template --template='{{range .items}}{{.metadata.namespace}} {{.metadata.name}} {{range .spec.containers}}{{.name}} {{end}}{{"\n"}}{{end}}' | while read -r namespace pod containers; do
for container in $containers; do
mkdir -p "${ARTIFACTS}/$namespace/$pod"
kubectl logs -n "$namespace" "$@" "$pod" "$container" >>"${ARTIFACTS}/$namespace/$pod/$container.log" &
echo "$!"
done
done
}
# Patches the image versions of test/e2e/testing-manifests/storage-csi/mock in the k/k
# source code, if needed.
patch_kubernetes () {
local source="$1" target="$2"
if [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ]; then
# We cannot replace registry.k8s.io/sig-storage with gcr.io/k8s-staging-sig-storage because
# e2e.test does not support it (see test/utils/image/manifest.go). Instead we
# invoke the e2e.test binary with KUBE_TEST_REPO_LIST set to a file that
# overrides that registry.
find "$source/test/e2e/testing-manifests/storage-csi/mock" -name '*.yaml' -print0 | xargs -0 sed -i -e 's;registry.k8s.io/sig-storage/\(.*\):v.*;registry.k8s.io/sig-storage/\1:canary;'
cat >"$target/e2e-repo-list" <<EOF
sigStorageRegistry: gcr.io/k8s-staging-sig-storage
EOF
cat >&2 <<EOF
Using a modified version of k/k/test/e2e:
$(cd "$source" && git diff 2>&1)
EOF
fi
}
# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test".
install_e2e () {
if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then
return
fi
if sidecar_tests_enabled; then
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go test -c -o "${CSI_PROW_WORK}/e2e-local.test" "${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}"
fi
git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 &&
if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then
patch_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_WORK}" &&
go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" &&
run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" &&
run_with_go "$go_version" make WHAT=vendor/github.com/onsi/ginkgo/ginkgo "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/ginkgo" "${CSI_PROW_BIN}"
else
run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e"
fi
}
# Makes the csi-sanity test suite binary available as
# "${CSI_PROW_WORK}/csi-sanity".
install_sanity () (
if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then
return