Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release-3.5: update release-tools #401

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/controller/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controller

import (
Expand Down
5 changes: 4 additions & 1 deletion release-tools/KUBERNETES_CSI_OWNERS_ALIASES
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,19 @@ aliases:
- ggriffiths
- gnufied
- humblec
- mauriciopoppe
- j-griffith
- Jiawei0227
- jingxu97
- jsafrane
- pohly
- RaunakShah
- sunnylovestiramisu
- xing-yang

# This documents who previously contributed to Kubernetes-CSI
# as approver.
emeritus_approvers:
- Jiawei0227
- lpabon
- sbezverk
- vladimirvivien
2 changes: 2 additions & 0 deletions release-tools/SIDECAR_RELEASE_PROCESS.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ naming convention `<hostpath-deployment-version>-on-<kubernetes-version>`.
1. Check that all [canary CI
jobs](https://k8s-testgrid.appspot.com/sig-storage-csi-ci) are passing,
and that test coverage is adequate for the changes that are going into the release.
1. Check that the post-\<sidecar\>-push-images builds are succeeding.
[Example](https://k8s-testgrid.appspot.com/sig-storage-image-build#post-external-snapshotter-push-images)
1. Make sure that no new PRs have merged in the meantime, and no PRs are in
flight and soon to be merged.
1. Create a new release following a previous release as a template. Be sure to select the correct
Expand Down
21 changes: 19 additions & 2 deletions release-tools/filter-junit.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,18 @@ var (
)

/*
* TestSuite represents a JUnit file. Due to how encoding/xml works, we have
* TestResults represents a JUnit file. Due to how encoding/xml works, we have
* represent all fields that we want to be passed through. It's therefore
* not a complete solution, but good enough for Ginkgo + Spyglass.
*
* Before Kubernetes 1.25 and ginkgo v2, we directly had <testsuite> in the
* JUnit file. Now we get <testsuites> and inside it the <testsuite>.
*/
type TestResults struct {
XMLName string `xml:"testsuites"`
TestSuite TestSuite `xml:"testsuite"`
}

type TestSuite struct {
XMLName string `xml:"testsuite"`
TestCases []TestCase `xml:"testcase"`
Expand All @@ -48,6 +56,7 @@ type TestCase struct {
Name string `xml:"name,attr"`
Time string `xml:"time,attr"`
SystemOut string `xml:"system-out,omitempty"`
SystemErr string `xml:"system-err,omitempty"`
Failure string `xml:"failure,omitempty"`
Skipped SkipReason `xml:"skipped,omitempty"`
}
Expand Down Expand Up @@ -93,7 +102,15 @@ func main() {
}
}
if err := xml.Unmarshal(data, &junit); err != nil {
panic(err)
if err.Error() != "expected element type <testsuite> but have <testsuites>" {
panic(err)
}
// Fall back to Ginkgo v2 format.
var junitv2 TestResults
if err := xml.Unmarshal(data, &junitv2); err != nil {
panic(err)
}
junit.TestCases = append(junit.TestCases, junitv2.TestSuite.TestCases...)
}
}

Expand Down
2 changes: 1 addition & 1 deletion release-tools/go-get-kubernetes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# This script can be used while converting a repo from "dep" to "go mod"
# by calling it after "go mod init" or to update the Kubernetes packages
# in a repo that has already been converted. Only packages that are
Expand Down
111 changes: 80 additions & 31 deletions release-tools/prow.sh
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,17 @@ configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -pp
# which is disabled with GOFLAGS=-mod=vendor).
configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags for using the vendor directory"

configvar CSI_PROW_GO_VERSION_BUILD "1.18" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_BUILD "1.19" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e
configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below
configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below
configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below

# ginkgo test runner version to use. If the pre-installed version is
# different, the desired version is built from source.
# different, the desired version is built from source. For Kubernetes,
# the version built via "make WHAT=vendor/github.com/onsi/ginkgo/ginkgo" is
# used, which is guaranteed to match what the Kubernetes e2e.test binary
# needs.
configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo"

# Ginkgo runs the E2E test in parallel. The default is based on the number
Expand All @@ -118,7 +121,7 @@ configvar CSI_PROW_BUILD_JOB true "building code in repo enabled"
# use the same settings as for "latest" Kubernetes. This works
# as long as there are no breaking changes in Kubernetes, like
# deprecating or changing the implementation of an alpha feature.
configvar CSI_PROW_KUBERNETES_VERSION 1.17.0 "Kubernetes"
configvar CSI_PROW_KUBERNETES_VERSION 1.22.0 "Kubernetes"

# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and
# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST
Expand All @@ -138,7 +141,7 @@ kind_version_default () {
latest|master)
echo main;;
*)
echo v0.11.1;;
echo v0.14.0;;
esac
}

Expand All @@ -149,16 +152,13 @@ configvar CSI_PROW_KIND_VERSION "$(kind_version_default)" "kind"

# kind images to use. Must match the kind version.
# The release notes of each kind release list the supported images.
configvar CSI_PROW_KIND_IMAGES "kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac
kindest/node:v1.22.0@sha256:b8bda84bb3a190e6e028b1760d277454a72267a5454b57db34437c34a588d047
kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
kindest/node:v1.20.7@sha256:cbeaf907fc78ac97ce7b625e4bf0de16e3ea725daf6b04f930bd14c67c671ff9
kindest/node:v1.19.11@sha256:07db187ae84b4b7de440a73886f008cf903fcf5764ba8106a9fd5243d6f32729
kindest/node:v1.18.19@sha256:7af1492e19b3192a79f606e43c35fb741e520d195f96399284515f077b3b622c
kindest/node:v1.17.17@sha256:66f1d0d91a88b8a001811e2f1054af60eef3b669a9a74f9b6db871f2f1eeed00
kindest/node:v1.16.15@sha256:83067ed51bf2a3395b24687094e283a7c7c865ccc12a8b1d7aa673ba0c5e8861
kindest/node:v1.15.12@sha256:b920920e1eda689d9936dfcf7332701e80be12566999152626b2c9d730397a95
kindest/node:v1.14.10@sha256:f8a66ef82822ab4f7569e91a5bccaf27bceee135c1457c512e54de8c6f7219f8" "kind images"
configvar CSI_PROW_KIND_IMAGES "kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e
kindest/node:v1.23.6@sha256:b1fa224cc6c7ff32455e0b1fd9cbfd3d3bc87ecaa8fcb06961ed1afb3db0f9ae
kindest/node:v1.22.9@sha256:8135260b959dfe320206eb36b3aeda9cffcb262f4b44cda6b33f7bb73f453105
kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207
kindest/node:v1.20.15@sha256:6f2d011dffe182bad80b85f6c00e8ca9d86b5b8922cdf433d53575c4c5212248
kindest/node:v1.19.16@sha256:d9c819e8668de8d5030708e484a9fdff44d95ec4675d136ef0a0a584e587f65c
kindest/node:v1.18.20@sha256:738cdc23ed4be6cc0b7ea277a2ebcc454c8373d7d8fb991a7fcdbd126188e6d7" "kind images"

# By default, this script tests sidecars with the CSI hostpath driver,
# using the install_csi_driver function. That function depends on
Expand Down Expand Up @@ -196,7 +196,7 @@ kindest/node:v1.14.10@sha256:f8a66ef82822ab4f7569e91a5bccaf27bceee135c1457c512e5
# If the deployment script is called with CSI_PROW_TEST_DRIVER=<file name> as
# environment variable, then it must write a suitable test driver configuration
# into that file in addition to installing the driver.
configvar CSI_PROW_DRIVER_VERSION "v1.3.0" "CSI driver version"
configvar CSI_PROW_DRIVER_VERSION "v1.8.0" "CSI driver version"
configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo"
configvar CSI_PROW_DEPLOYMENT "" "deployment"
configvar CSI_PROW_DEPLOYMENT_SUFFIX "" "additional suffix in kubernetes-x.yy[suffix].yaml files"
Expand Down Expand Up @@ -228,13 +228,16 @@ configvar CSI_PROW_E2E_VERSION "$(version_to_git "${CSI_PROW_KUBERNETES_VERSION}
configvar CSI_PROW_E2E_REPO "https://github.com/kubernetes/kubernetes" "E2E repo"
configvar CSI_PROW_E2E_IMPORT_PATH "k8s.io/kubernetes" "E2E package"

# Local path for e2e tests. Set to "none" to disable.
configvar CSI_PROW_SIDECAR_E2E_IMPORT_PATH "none" "CSI Sidecar E2E package"

# csi-sanity testing from the csi-test repo can be run against the installed
# CSI driver. For this to work, deploying the driver must expose the Unix domain
# csi.sock as a TCP service for use by the csi-sanity command, which runs outside
# of the cluster. The alternative would have been to (cross-)compile csi-sanity
# and install it inside the cluster, which is not necessarily easier.
configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo"
configvar CSI_PROW_SANITY_VERSION v4.3.0 "csi-test version"
configvar CSI_PROW_SANITY_VERSION v5.0.0 "csi-test version"
configvar CSI_PROW_SANITY_PACKAGE_PATH github.com/kubernetes-csi/csi-test "csi-test package"
configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock"
configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver"
Expand Down Expand Up @@ -282,13 +285,18 @@ tests_enabled () {
sanity_enabled () {
[ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity"
}

sidecar_tests_enabled () {
[ "${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}" != "none" ]
}

tests_need_kind () {
tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" ||
sanity_enabled
sanity_enabled || sidecar_tests_enabled
}
tests_need_non_alpha_cluster () {
tests_enabled "parallel" "serial" ||
sanity_enabled
sanity_enabled || sidecar_tests_enabled
}
tests_need_alpha_cluster () {
tests_enabled "parallel-alpha" "serial-alpha"
Expand Down Expand Up @@ -346,15 +354,23 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi
# kubernetes-csi components must be updated, either by disabling
# the failing test for "latest" or by updating the test and not running
# it anymore for older releases.
configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'GenericEphemeralVolume=true,CSIStorageCapacity=true' "alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_ALPHA_GATES_LATEST '' "alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates"

configvar CSI_PROW_E2E_GATES_LATEST '' "non alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_GATES "$(get_versioned_variable CSI_PROW_E2E_GATES "${csi_prow_kubernetes_version_suffix}")" "non alpha E2E feature gates"

# Focus for local tests run in the sidecar E2E repo. Only used if CSI_PROW_SIDECAR_E2E_IMPORT_PATH
# is not set to "none". If empty, all tests in the sidecar repo will be run.
configvar CSI_PROW_SIDECAR_E2E_FOCUS '' "tags for local E2E tests"
configvar CSI_PROW_SIDECAR_E2E_SKIP '' "local tests that need to be skipped"

# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment
default_csi_snapshotter_version () {
if [ "${CSI_PROW_KUBERNETES_VERSION}" = "latest" ] || [ "${CSI_PROW_DRIVER_CANARY}" = "canary" ]; then
echo "master"
else
echo "v3.0.2"
echo "v4.0.0"
fi
}
configvar CSI_SNAPSHOTTER_VERSION "$(default_csi_snapshotter_version)" "external-snapshotter version tag"
Expand All @@ -365,7 +381,7 @@ configvar CSI_SNAPSHOTTER_VERSION "$(default_csi_snapshotter_version)" "external
# whether they can run with the current cluster provider, but until
# they are, we filter them out by name. Like the other test selection
# variables, this is again a space separated list of regular expressions.
configvar CSI_PROW_E2E_SKIP 'Disruptive' "tests that need to be skipped"
configvar CSI_PROW_E2E_SKIP '\[Disruptive\]|\[Feature:SELinux\]' "tests that need to be skipped"

# This creates directories that are required for testing.
ensure_paths () {
Expand Down Expand Up @@ -437,6 +453,10 @@ install_kind () {

# Ensure that we have the desired version of the ginkgo test runner.
install_ginkgo () {
if [ -e "${CSI_PROW_BIN}/ginkgo" ]; then
return
fi

# CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not.
if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then
return
Expand Down Expand Up @@ -935,12 +955,17 @@ install_e2e () {
return
fi

if sidecar_tests_enabled; then
run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go test -c -o "${CSI_PROW_WORK}/e2e-local.test" "${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}"
fi
git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 &&
if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then
patch_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_WORK}" &&
go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" &&
run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}"
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" &&
run_with_go "$go_version" make WHAT=vendor/github.com/onsi/ginkgo/ginkgo "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/ginkgo" "${CSI_PROW_BIN}"
else
run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e"
fi
Expand Down Expand Up @@ -983,13 +1008,21 @@ run_e2e () (
# the full Kubernetes E2E testsuite while only running a few tests.
move_junit () {
if ls "${ARTIFACTS}"/junit_[0-9]*.xml 2>/dev/null >/dev/null; then
run_filter_junit -t="External.Storage|CSI.mock.volume" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml
mkdir -p "${ARTIFACTS}/junit/${name}" &&
mkdir -p "${ARTIFACTS}/junit/steps" &&
run_filter_junit -t="External.Storage|CSI.mock.volume" -o "${ARTIFACTS}/junit/steps/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml &&
mv "${ARTIFACTS}"/junit_[0-9]*.xml "${ARTIFACTS}/junit/${name}/"
fi
}
trap move_junit EXIT

cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
run_with_loggers env KUBECONFIG="$KUBECONFIG" KUBE_TEST_REPO_LIST="$(if [ -e "${CSI_PROW_WORK}/e2e-repo-list" ]; then echo "${CSI_PROW_WORK}/e2e-repo-list"; fi)" ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/test-driver.yaml"
if [ "${name}" == "local" ]; then
cd "${GOPATH}/src/${CSI_PROW_SIDECAR_E2E_IMPORT_PATH}" &&
run_with_loggers env KUBECONFIG="$KUBECONFIG" KUBE_TEST_REPO_LIST="$(if [ -e "${CSI_PROW_WORK}/e2e-repo-list" ]; then echo "${CSI_PROW_WORK}/e2e-repo-list"; fi)" ginkgo -v "$@" "${CSI_PROW_WORK}/e2e-local.test" -- -report-dir "${ARTIFACTS}" -report-prefix local
else
cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" &&
run_with_loggers env KUBECONFIG="$KUBECONFIG" KUBE_TEST_REPO_LIST="$(if [ -e "${CSI_PROW_WORK}/e2e-repo-list" ]; then echo "${CSI_PROW_WORK}/e2e-repo-list"; fi)" ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/test-driver.yaml"
fi
)

# Run csi-sanity against installed CSI driver.
Expand Down Expand Up @@ -1055,13 +1088,14 @@ kubectl exec "$pod" -c "${CSI_PROW_SANITY_CONTAINER}" -- /bin/sh -c "\${CHECK_PA
EOF

chmod u+x "${CSI_PROW_WORK}"/*dir_in_pod.sh
mkdir -p "${ARTIFACTS}/junit/steps"

# This cannot run in parallel, because -csi.junitfile output
# from different Ginkgo nodes would go to the same file. Also the
# staging and target directories are the same.
run_with_loggers "${CSI_PROW_WORK}/csi-sanity" \
-ginkgo.v \
-csi.junitfile "${ARTIFACTS}/junit_sanity.xml" \
-csi.junitfile "${ARTIFACTS}/junit/steps/junit_sanity.xml" \
-csi.endpoint "dns:///$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' csi-prow-control-plane):$(kubectl get "services/${CSI_PROW_SANITY_SERVICE}" -o "jsonpath={..nodePort}")" \
-csi.stagingdir "/tmp/staging" \
-csi.mountdir "/tmp/mount" \
Expand Down Expand Up @@ -1091,7 +1125,8 @@ make_test_to_junit () {
# Plain make-test.xml was not delivered as text/xml by the web
# server and ignored by spyglass. It seems that the name has to
# match junit*.xml.
out="${ARTIFACTS}/junit_make_test.xml"
out="${ARTIFACTS}/junit/steps/junit_make_test.xml"
mkdir -p "$(dirname "$out")"
testname=
echo "<testsuite>" >>"$out"

Expand Down Expand Up @@ -1254,7 +1289,8 @@ main () {
fi

if tests_need_non_alpha_cluster; then
start_cluster || die "starting the non-alpha cluster failed"
# Need to (re)create the cluster.
start_cluster "${CSI_PROW_E2E_GATES}" || die "starting the non-alpha cluster failed"

# Install necessary snapshot CRDs and snapshot controller
install_snapshot_crds
Expand Down Expand Up @@ -1300,11 +1336,24 @@ main () {
ret=1
fi
fi

if sidecar_tests_enabled; then
if ! run_e2e local \
-focus="${CSI_PROW_SIDECAR_E2E_FOCUS}" \
-skip="$(regex_join "${CSI_PROW_E2E_SERIAL}")"; then
warn "E2E sidecar failed"
ret=1
fi
fi
fi
delete_cluster_inside_prow_job non-alpha
fi

if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then
# If the cluster for alpha tests doesn't need any feature gates, then we
# could reuse the same cluster as for the other tests. But that would make
# the flow in this script harder and wouldn't help in practice because
# we have separate Prow jobs for alpha and non-alpha tests.
if tests_need_alpha_cluster; then
# Need to (re)create the cluster.
start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed"

Expand Down Expand Up @@ -1341,8 +1390,8 @@ main () {
fi

# Merge all junit files into one. This gets rid of duplicated "skipped" tests.
if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then
run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"
if ls "${ARTIFACTS}"/junit/steps/junit_*.xml 2>/dev/null >&2; then
run_filter_junit -o "${ARTIFACTS}/junit_final.xml" "${ARTIFACTS}"/junit/steps/junit_*.xml
fi

return "$ret"
Expand Down