diff --git a/.github/workflows/atlas-image-build.yaml b/.github/workflows/atlas-image-build.yaml index ac878d5838..e1f2ed1b83 100644 --- a/.github/workflows/atlas-image-build.yaml +++ b/.github/workflows/atlas-image-build.yaml @@ -39,7 +39,7 @@ jobs: if: needs.check-files.outputs.changed == 'true' steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Image metadata id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -57,7 +57,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: context: "{{defaultContext}}:docker/mongodb-atlas" push: true diff --git a/.github/workflows/build_docker.yaml b/.github/workflows/build_docker.yaml index 1619c54cfc..839d1f16ad 100644 --- a/.github/workflows/build_docker.yaml +++ b/.github/workflows/build_docker.yaml @@ -47,7 +47,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Login to GHCR uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: @@ -66,7 +66,7 @@ jobs: ${{ inputs.extra_tags }} labels: ${{ inputs.labels }} - name: Build and push - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: context: . file: ${{ inputs.image_file }} diff --git a/.github/workflows/govulncheck.yaml b/.github/workflows/govulncheck.yaml index 313270ab51..5a81c0a20f 100644 --- a/.github/workflows/govulncheck.yaml +++ b/.github/workflows/govulncheck.yaml @@ -24,7 +24,7 @@ jobs: echo "go_version=$version" >> "$GITHUB_OUTPUT" - id: govulncheck name: 'Govulncheck' - uses: golang/govulncheck-action@dd0578b371c987f96d1185abb54344b44352bd58 # v1.0.3 + uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1.0.4 continue-on-error: ${{ github.event_name == 'pull_request' }} with: repo-checkout: false diff --git a/.github/workflows/kanister-image-build.yaml b/.github/workflows/kanister-image-build.yaml index f7af83a801..4a6d5ed43f 100644 --- a/.github/workflows/kanister-image-build.yaml +++ b/.github/workflows/kanister-image-build.yaml @@ -54,7 +54,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Image metadata id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -73,7 +73,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: context: "{{defaultContext}}:docker/build" platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml index 89e8bc30a9..2c13c6b328 100644 --- a/.github/workflows/ossf-scorecard.yml +++ b/.github/workflows/ossf-scorecard.yml @@ -39,7 +39,7 @@ jobs: - # Upload the results to GitHub's code scanning dashboard. name: "Upload to results to dashboard" - uses: github/codeql-action/upload-sarif@461ef6c76dfe95d5c364de2f431ddbd31a417628 # v3.26.9 + uses: github/codeql-action/upload-sarif@6db8d6351fd0be61f9ed8ebd12ccd35dcec51fea # v3.26.11 with: sarif_file: results.sarif - diff --git a/docker/couchbase-tools/Dockerfile b/docker/couchbase-tools/Dockerfile index 547d546378..1814343650 100644 --- a/docker/couchbase-tools/Dockerfile +++ b/docker/couchbase-tools/Dockerfile @@ -1,4 +1,4 @@ -FROM couchbase:enterprise-7.6.2 +FROM couchbase:enterprise-7.6.3 MAINTAINER "Tom Manville " diff --git a/docker/kafka-adobes3Connector/image/adobeSink.Dockerfile b/docker/kafka-adobes3Connector/image/adobeSink.Dockerfile index cffc461fbe..4557b267eb 100644 --- a/docker/kafka-adobes3Connector/image/adobeSink.Dockerfile +++ b/docker/kafka-adobes3Connector/image/adobeSink.Dockerfile @@ -1,7 +1,7 @@ ARG TOOLS_IMAGE FROM ${TOOLS_IMAGE} AS TOOLS_IMAGE -FROM confluentinc/cp-kafka-connect:7.7.0 +FROM confluentinc/cp-kafka-connect:7.7.1 USER root diff --git a/docker/kafka-adobes3Connector/image/adobeSource.Dockerfile b/docker/kafka-adobes3Connector/image/adobeSource.Dockerfile index e9a55a4276..46e7a9e0fe 100644 --- a/docker/kafka-adobes3Connector/image/adobeSource.Dockerfile +++ b/docker/kafka-adobes3Connector/image/adobeSource.Dockerfile @@ -1,4 +1,4 @@ -FROM confluentinc/cp-kafka-connect:7.7.0 +FROM confluentinc/cp-kafka-connect:7.7.1 USER root diff --git a/docker/postgres-kanister-tools/Dockerfile b/docker/postgres-kanister-tools/Dockerfile index 7b7a4a0cac..6856256751 100644 --- a/docker/postgres-kanister-tools/Dockerfile +++ b/docker/postgres-kanister-tools/Dockerfile @@ -4,7 +4,7 @@ ARG TOOLS_IMAGE FROM ${TOOLS_IMAGE} AS TOOLS_IMAGE # Actual image base -FROM postgres:16-bullseye +FROM postgres:17-bullseye ENV DEBIAN_FRONTEND noninteractive diff --git a/docker/postgres-kanister-tools/requirements.txt b/docker/postgres-kanister-tools/requirements.txt index 127f017647..39484b56d7 100644 --- a/docker/postgres-kanister-tools/requirements.txt +++ b/docker/postgres-kanister-tools/requirements.txt @@ -1,4 +1,4 @@ -awscli==1.34.26 +awscli==1.34.29 pip==24.2 setuptools==75.1.0 wheel==0.44.0 diff --git a/docker/postgresql/requirements.txt b/docker/postgresql/requirements.txt index 0778d34eef..e2bb3422c2 100644 --- a/docker/postgresql/requirements.txt +++ b/docker/postgresql/requirements.txt @@ -1,4 +1,4 @@ -awscli==1.34.26 +awscli==1.34.29 wal-e==1.1.1 pip==24.2 setuptools==75.1.0 diff --git a/docs/functions.rst b/docs/functions.rst index 42f92e4a02..bba4e8a4bd 100644 --- a/docs/functions.rst +++ b/docs/functions.rst @@ -150,6 +150,82 @@ Example: - | echo "Example" +MultiContainerRun +----------------- + +MultiContainerRun spins up a new pod with two containers connected +via shared `emptyDir`_ volume. +It's similar to KubeTask, but allows using multiple images to move backup data. +"background" container is one responsible for generating data, while "output" container +should export it to destination. +The main difference between these containers is that phase outputs can only be generated +from the "output" container. +The function also supports an optional init container to set up the volume contents. + +.. csv-table:: + :header: "Argument", "Required", "Type", "Description" + :align: left + :widths: 5,5,5,15 + + `namespace`, No, `string`, namespace in which to execute (the pod will be created in controller's namespace if not specified) + `backgroundImage`, Yes, `string`, image to be used in "background" container + `backgroundCommand`, Yes, `[]string`, command list to execute in "background" container + `outputImage`, Yes, `string`, image to be used in "output" container + `outputCommand`, Yes, `[]string`, command list to execute in "output" container + `initImage`, No, `string`, image to be used in init container of the pod + `initCommand`, No, `[]string`, command list to execute in init container of the pod + `podOverride`, No, `map[string]interface{}`, specs to override default pod specs with + `podAnnotations`, No, `map[string]string`, custom annotations for the temporary pod that gets created + `podLabels`, No, `map[string]string`, custom labels for the temporary pod that gets created + `sharedVolumeMedium`, No, `string`, medium setting for shared volume. See `emptyDir`_. + `sharedVolumeSizeLimit`, No, `string`, sizeLimit setting for shared volume. See `emptyDir`_. + `sharedVolumeDir`, No, `string`, directory to mount shared volume. Defaults to `/tmp` + +.. _emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir + +Example: + +.. code-block:: yaml + :linenos: + + - func: MultiContainerRun + name: examplePhase + args: + namespace: "{{ .Deployment.Namespace }}" + podOverride: + containers: + - name: export + imagePullPolicy: IfNotPresent + podAnnotations: + annKey: annValue + podLabels: + labelKey: labelValue + sharedVolumeMedium: Memory + sharedVolumeSizeLimit: 1Gi + sharedVolumeDir: /tmp/ + initImage: ubuntu + initCommand: + - bash + - -c + - | + mkfifo /tmp/pipe-file + backgroundImage: ubuntu + backgroundCommand: + - bash + - -c + - | + for i in {1..10} + do + echo $i + sleep 0.1 + done > /tmp/pipe-file + outputImage: ubuntu + outputCommand: + - bash + - -c + - | + cat /tmp/pipe-file + ScaleWorkload ------------- diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 143b714270..7c8b225396 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -63,3 +63,6 @@ webhook Kopia kopia hostname +emptyDir +sizeLimit +init diff --git a/docs_new/functions.md b/docs_new/functions.md index b3f1adee99..d2c7232334 100644 --- a/docs_new/functions.md +++ b/docs_new/functions.md @@ -129,6 +129,72 @@ Example: echo "Example" ``` +### MultiContainerRun + +MultiContainerRun spins up a new pod with two containers connected via shared [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume. +It's similar to KubeTask, but allows using multiple images to move backup data. +"background" container is one responsible for generating data, while "output" container +should export it to destination. +The main difference between these containers is that phase outputs can only be generated from the +"output" container. +The function also supports an optional init container to set up the volume contents. + + + | Argument | Required | Type | Description | + | ----------- | :------: | ----------------------- | ----------- | + | namespace | No | string | namespace in which to execute (the pod will be created in controller's namespace if not specified) | + | backgroundImage | Yes | string | image to be used in "background" container | + | backgroundCommand | Yes | []string | command list to execute in "background" container | + | outputImage | Yes | string | image to be used in "output" container | + | outputCommand | Yes | []string | command list to execute in "output" container | + | initImage | No | string | image to be used in init container of the pod | + | initCommand | No | []string | command list to execute in init container of the pod | + | podOverride | No | map[string]interface{} | specs to override default pod specs with | + | podAnnotations | No | map[string]string | custom annotations for the temporary pod that gets created | + | podLabels | No | map[string]string | custom labels for the temporary pod that gets created | + | sharedVolumeMedium | No | string | medium setting for shared volume. See [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). | + | sharedVolumeSizeLimit | No | string | sizeLimit setting for shared volume. See [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). | + | sharedVolumeDir | No | string | directory to mount shared volume, defaults to `/tmp` | + + +Example: + +``` yaml +- func: MultiContainerRun + name: examplePhase + args: + namespace: "{{ .Deployment.Namespace }}" + podOverride: + containers: + - name: export + imagePullPolicy: IfNotPresent + podAnnotations: + annKey: annValue + podLabels: + labelKey: labelValue + sharedVolumeMedium: Memory + sharedVolumeSizeLimit: 1Gi + sharedVolumeDir: /tmp/ + backgroundImage: ubuntu + backgroundCommand: + - bash + - -c + - | + mkfifo /tmp/pipe-file + for i in {1..10} + do + echo $i + sleep 0.1 + done > /tmp/pipe-file + outputImage: ubuntu + outputCommand: + - bash + - -c + - | + while [ ! -e /tmp/pipe-file ]; do sleep 1; done + cat /tmp/pipe-file +``` + ### ScaleWorkload ScaleWorkload is used to scale up or scale down a Kubernetes workload. diff --git a/pkg/function/multi_container_run.go b/pkg/function/multi_container_run.go new file mode 100644 index 0000000000..7dffab95e0 --- /dev/null +++ b/pkg/function/multi_container_run.go @@ -0,0 +1,345 @@ +// +// Copyright 2019 The Kanister Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package function + +import ( + "context" + "sort" + "time" + + "github.com/kanisterio/errkit" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + kanister "github.com/kanisterio/kanister/pkg" + crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" + "github.com/kanisterio/kanister/pkg/consts" + "github.com/kanisterio/kanister/pkg/field" + "github.com/kanisterio/kanister/pkg/kube" + "github.com/kanisterio/kanister/pkg/log" + "github.com/kanisterio/kanister/pkg/output" + "github.com/kanisterio/kanister/pkg/param" + "github.com/kanisterio/kanister/pkg/progress" + "github.com/kanisterio/kanister/pkg/utils" +) + +const ( + // MultiContainerRunFuncName gives the function name + MultiContainerRunFuncName = "MultiContainerRun" + MultiContainerRunNamespaceArg = "namespace" + MultiContainerRunBackgroundImageArg = "backgroundImage" + MultiContainerRunBackgroundCommandArg = "backgroundCommand" + MultiContainerRunOutputImageArg = "outputImage" + MultiContainerRunOutputCommandArg = "outputCommand" + MultiContainerRunVolumeMediumArg = "sharedVolumeMedium" + MultiContainerRunVolumeSizeLimitArg = "sharedVolumeSizeLimit" + MultiContainerRunSharedDirArg = "sharedVolumeDir" + MultiContainerRunPodOverrideArg = "podOverride" + MultiContainerRunInitImageArg = "initImage" + MultiContainerRunInitCommandArg = "initCommand" +) + +const ( + ktpBackgroundContainer = "background" + ktpOutputContainer = "output" + ktpSharedVolumeName = "shared" + ktpDefaultSharedDir = "/tmp/" +) + +func init() { + _ = kanister.Register(&multiContainerRunFunc{}) +} + +var _ kanister.Func = (*multiContainerRunFunc)(nil) + +type multiContainerRunFunc struct { + progressPercent string + namespace string + backgroundImage string + backgroundCommand []string + outputImage string + outputCommand []string + initImage string + initCommand []string + storageDir string + storageMedium corev1.StorageMedium + storageSizeLimit *resource.Quantity + podOverride crv1alpha1.JSONMap + labels map[string]string + annotations map[string]string +} + +func (*multiContainerRunFunc) Name() string { + return MultiContainerRunFuncName +} + +func (ktpf *multiContainerRunFunc) run( + ctx context.Context, + cli kubernetes.Interface, +) (map[string]interface{}, error) { + volumeMounts := []corev1.VolumeMount{ + { + Name: ktpSharedVolumeName, + MountPath: ktpf.storageDir, + }, + } + + var initContainers []corev1.Container + // If init image is specified + if ktpf.initImage != "" { + initContainers = []corev1.Container{ + { + Name: "init", + Image: ktpf.initImage, + Command: ktpf.initCommand, + VolumeMounts: volumeMounts, + }, + } + } + + podSpec := corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: ktpSharedVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: ktpf.storageMedium, + SizeLimit: ktpf.storageSizeLimit, + }, + }, + }, + }, + InitContainers: initContainers, + Containers: []corev1.Container{ + { + Name: ktpOutputContainer, + Image: ktpf.outputImage, + Command: ktpf.outputCommand, + VolumeMounts: volumeMounts, + }, + { + Name: ktpBackgroundContainer, + Image: ktpf.backgroundImage, + Command: ktpf.backgroundCommand, + VolumeMounts: volumeMounts, + }, + }, + } + + podSpec, err := kube.PatchDefaultPodSpecs(podSpec, ktpf.podOverride) + if err != nil { + return nil, errkit.Wrap(err, "Unable to apply podOverride", "podSpec", podSpec, "podOverride", ktpf.podOverride) + } + + // Put the output container the first + sort.Slice(podSpec.Containers, func(i, j int) bool { + return podSpec.Containers[i].Name == ktpOutputContainer + }) + + if ktpf.labels == nil { + ktpf.labels = make(map[string]string) + } + ktpf.labels[consts.LabelKeyCreatedBy] = consts.LabelValueKanister + + if ktpf.annotations == nil { + ktpf.annotations = make(map[string]string) + } + // FIXME: this doesn't work with pod controller currently so we have to reorder containers + ktpf.annotations[defaultContainerAnn] = ktpOutputContainer + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: jobPrefix, + Namespace: ktpf.namespace, + Labels: ktpf.labels, + Annotations: ktpf.annotations, + }, + Spec: podSpec, + } + + pod, err = cli.CoreV1().Pods(ktpf.namespace).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + return nil, errkit.Wrap(err, "Failed to create pod") + } + pc, err := kube.NewPodControllerForExistingPod(cli, pod) + if err != nil { + return nil, err + } + + ctx = field.Context(ctx, consts.PodNameKey, pod.Name) + ctx = field.Context(ctx, consts.ContainerNameKey, pod.Spec.Containers[0].Name) + go func() { + <-ctx.Done() + err := pc.StopPod(context.Background(), kube.PodControllerInfiniteStopTime, int64(0)) + if err != nil { + log.WithError(err).Print("Failed to delete pod", field.M{"PodName": pod.Name}) + } + }() + + return getPodOutput(ctx, pc) +} + +// This function is similar to kubeTaskPodFunc +func getPodOutput(ctx context.Context, pc kube.PodController) (map[string]interface{}, error) { + if err := pc.WaitForPodReady(ctx); err != nil { + return nil, errkit.Wrap(err, "Failed while waiting for Pod to be ready", "pod", pc.PodName()) + } + ctx = field.Context(ctx, consts.LogKindKey, consts.LogKindDatapath) + // Fetch logs from the pod + r, err := pc.StreamPodLogs(ctx) + if err != nil { + return nil, errkit.Wrap(err, "Failed to fetch logs from the pod") + } + out, err := output.LogAndParse(ctx, r) + if err != nil { + return nil, err + } + // Wait for pod completion + if err := pc.WaitForPodCompletion(ctx); err != nil { + return nil, errkit.Wrap(err, "Failed while waiting for Pod to complete", "pod", pc.PodName()) + } + return out, err +} + +func (ktpf *multiContainerRunFunc) Exec(ctx context.Context, tp param.TemplateParams, args map[string]interface{}) (map[string]interface{}, error) { + // Set progress percent + ktpf.progressPercent = progress.StartedPercent + defer func() { ktpf.progressPercent = progress.CompletedPercent }() + + var storageSizeString string + var bpAnnotations, bpLabels map[string]string + var err error + if err = Arg(args, MultiContainerRunBackgroundImageArg, &ktpf.backgroundImage); err != nil { + return nil, err + } + if err = Arg(args, MultiContainerRunOutputImageArg, &ktpf.outputImage); err != nil { + return nil, err + } + if err = OptArg(args, MultiContainerRunInitImageArg, &ktpf.initImage, ""); err != nil { + return nil, err + } + if err = Arg(args, MultiContainerRunBackgroundCommandArg, &ktpf.backgroundCommand); err != nil { + return nil, err + } + if err = Arg(args, MultiContainerRunOutputCommandArg, &ktpf.outputCommand); err != nil { + return nil, err + } + if err = OptArg(args, MultiContainerRunInitCommandArg, &ktpf.initCommand, nil); err != nil { + return nil, err + } + if err = OptArg(args, MultiContainerRunNamespaceArg, &ktpf.namespace, ""); err != nil { + return nil, err + } + if err = OptArg(args, MultiContainerRunVolumeMediumArg, &ktpf.storageMedium, ""); err != nil { + return nil, err + } + if err = OptArg(args, MultiContainerRunVolumeSizeLimitArg, &storageSizeString, ""); err != nil { + return nil, err + } + if storageSizeString != "" { + size, err := resource.ParseQuantity(storageSizeString) + if err != nil { + return nil, errkit.Wrap(err, "Failed to parse sharedStorageSize arg") + } + ktpf.storageSizeLimit = &size + } + if err = OptArg(args, MultiContainerRunSharedDirArg, &ktpf.storageDir, ktpDefaultSharedDir); err != nil { + return nil, err + } + if err = OptArg(args, PodAnnotationsArg, &bpAnnotations, nil); err != nil { + return nil, err + } + if err = OptArg(args, PodLabelsArg, &bpLabels, nil); err != nil { + return nil, err + } + + ktpf.podOverride, err = GetPodSpecOverride(tp, args, MultiContainerRunPodOverrideArg) + if err != nil { + return nil, err + } + + ktpf.labels = bpLabels + ktpf.annotations = bpAnnotations + if tp.PodAnnotations != nil { + // merge the actionset annotations with blueprint annotations + var actionSetAnn ActionSetAnnotations = tp.PodAnnotations + ktpf.annotations = actionSetAnn.MergeBPAnnotations(bpAnnotations) + } + + if tp.PodLabels != nil { + // merge the actionset labels with blueprint labels + var actionSetLabels ActionSetLabels = tp.PodLabels + ktpf.labels = actionSetLabels.MergeBPLabels(bpLabels) + } + + cli, err := kube.NewClient() + if err != nil { + return nil, errkit.Wrap(err, "Failed to create Kubernetes client") + } + return ktpf.run( + ctx, + cli, + ) +} + +func (*multiContainerRunFunc) RequiredArgs() []string { + return []string{ + MultiContainerRunBackgroundImageArg, + MultiContainerRunBackgroundCommandArg, + MultiContainerRunOutputImageArg, + MultiContainerRunOutputCommandArg, + } +} + +func (*multiContainerRunFunc) Arguments() []string { + return []string{ + MultiContainerRunNamespaceArg, + MultiContainerRunInitImageArg, + MultiContainerRunInitCommandArg, + MultiContainerRunBackgroundImageArg, + MultiContainerRunBackgroundCommandArg, + MultiContainerRunOutputImageArg, + MultiContainerRunOutputCommandArg, + MultiContainerRunVolumeMediumArg, + MultiContainerRunVolumeSizeLimitArg, + MultiContainerRunSharedDirArg, + MultiContainerRunPodOverrideArg, + PodLabelsArg, + PodAnnotationsArg, + } +} + +func (ktpf *multiContainerRunFunc) Validate(args map[string]any) error { + if err := ValidatePodLabelsAndAnnotations(ktpf.Name(), args); err != nil { + return err + } + + if err := utils.CheckSupportedArgs(ktpf.Arguments(), args); err != nil { + return err + } + + return utils.CheckRequiredArgs(ktpf.RequiredArgs(), args) +} + +func (k *multiContainerRunFunc) ExecutionProgress() (crv1alpha1.PhaseProgress, error) { + metav1Time := metav1.NewTime(time.Now()) + return crv1alpha1.PhaseProgress{ + ProgressPercent: k.progressPercent, + LastTransitionTime: &metav1Time, + }, nil +} diff --git a/pkg/function/multi_container_run_test.go b/pkg/function/multi_container_run_test.go new file mode 100644 index 0000000000..23237101c4 --- /dev/null +++ b/pkg/function/multi_container_run_test.go @@ -0,0 +1,204 @@ +// Copyright 2019 The Kanister Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package function + +import ( + "context" + "os" + "time" + + . "gopkg.in/check.v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + kanister "github.com/kanisterio/kanister/pkg" + crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1" + "github.com/kanisterio/kanister/pkg/consts" + "github.com/kanisterio/kanister/pkg/kube" + "github.com/kanisterio/kanister/pkg/param" +) + +var _ = Suite(&MultiContainerRunSuite{}) + +type MultiContainerRunSuite struct { + cli kubernetes.Interface + namespace string +} + +func (s *MultiContainerRunSuite) SetUpSuite(c *C) { + cli, err := kube.NewClient() + c.Assert(err, IsNil) + s.cli = cli + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "kanister-multicontainerruntest-", + }, + } + cns, err := s.cli.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + c.Assert(err, IsNil) + s.namespace = cns.Name + err = os.Setenv("POD_NAMESPACE", cns.Name) + c.Assert(err, IsNil) + err = os.Setenv("POD_SERVICE_ACCOUNT", "default") + c.Assert(err, IsNil) +} + +func (s *MultiContainerRunSuite) TearDownSuite(c *C) { + if s.namespace != "" { + _ = s.cli.CoreV1().Namespaces().Delete(context.TODO(), s.namespace, metav1.DeleteOptions{}) + } +} + +func multiContainerRunPhase(namespace string) crv1alpha1.BlueprintPhase { + return crv1alpha1.BlueprintPhase{ + Name: "testMultiContainerRun", + Func: MultiContainerRunFuncName, + Args: map[string]interface{}{ + MultiContainerRunNamespaceArg: namespace, + MultiContainerRunBackgroundImageArg: consts.LatestKanisterToolsImage, + MultiContainerRunBackgroundCommandArg: []string{ + "sh", + "-c", + "echo foo > /tmp/file", + }, + MultiContainerRunOutputImageArg: consts.LatestKanisterToolsImage, + MultiContainerRunOutputCommandArg: []string{ + "sh", + "-c", + "while [ ! -e /tmp/file ]; do sleep 1; done; kando output value $(cat /tmp/file)", + }, + }, + } +} + +func (s *MultiContainerRunSuite) TestMultiContainerRun(c *C) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + tp := param.TemplateParams{ + StatefulSet: ¶m.StatefulSetParams{ + Namespace: s.namespace, + }, + PodOverride: crv1alpha1.JSONMap{ + "containers": []map[string]interface{}{ + { + "name": "background", + "imagePullPolicy": "Always", + }, + { + "name": "output", + "imagePullPolicy": "Always", + }, + }, + }, + } + action := "test" + for _, tc := range []struct { + bp *crv1alpha1.Blueprint + outs []map[string]interface{} + }{ + { + bp: newTaskBlueprint(multiContainerRunPhase(s.namespace)), + outs: []map[string]interface{}{ + { + "value": "foo", + }, + }, + }, + } { + phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) + c.Assert(err, IsNil) + c.Assert(phases, HasLen, len(tc.outs)) + for i, p := range phases { + out, err := p.Exec(ctx, *tc.bp, action, tp) + c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) + c.Assert(out, DeepEquals, tc.outs[i]) + } + } +} + +func multiContainerRunPhaseWithInit(namespace string) crv1alpha1.BlueprintPhase { + return crv1alpha1.BlueprintPhase{ + Name: "testMultiContainerRun", + Func: MultiContainerRunFuncName, + Args: map[string]interface{}{ + MultiContainerRunNamespaceArg: namespace, + MultiContainerRunInitImageArg: consts.LatestKanisterToolsImage, + MultiContainerRunInitCommandArg: []string{ + "sh", + "-c", + "mkfifo /tmp/file", + }, + MultiContainerRunBackgroundImageArg: consts.LatestKanisterToolsImage, + MultiContainerRunBackgroundCommandArg: []string{ + "sh", + "-c", + "if [ ! -e /tmp/file ]; then exit 1; fi; echo foo >> /tmp/file", + }, + MultiContainerRunOutputImageArg: consts.LatestKanisterToolsImage, + MultiContainerRunOutputCommandArg: []string{ + "sh", + "-c", + "if [ ! -e /tmp/file ]; then exit 1; fi; kando output value $(cat /tmp/file)", + }, + }, + } +} + +func (s *MultiContainerRunSuite) TestMultiContainerRunWithInit(c *C) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + tp := param.TemplateParams{ + StatefulSet: ¶m.StatefulSetParams{ + Namespace: s.namespace, + }, + PodOverride: crv1alpha1.JSONMap{ + "containers": []map[string]interface{}{ + { + "name": "background", + "imagePullPolicy": "Always", + }, + { + "name": "output", + "imagePullPolicy": "Always", + }, + }, + }, + } + action := "test" + for _, tc := range []struct { + bp *crv1alpha1.Blueprint + outs []map[string]interface{} + }{ + { + bp: newTaskBlueprint(multiContainerRunPhaseWithInit(s.namespace)), + outs: []map[string]interface{}{ + { + "value": "foo", + }, + }, + }, + } { + phases, err := kanister.GetPhases(*tc.bp, action, kanister.DefaultVersion, tp) + c.Assert(err, IsNil) + c.Assert(phases, HasLen, len(tc.outs)) + for i, p := range phases { + out, err := p.Exec(ctx, *tc.bp, action, tp) + c.Assert(err, IsNil, Commentf("Phase %s failed", p.Name())) + c.Assert(out, DeepEquals, tc.outs[i]) + } + } +} diff --git a/pkg/function/utils.go b/pkg/function/utils.go index 9d73e7caad..611bec8e7b 100644 --- a/pkg/function/utils.go +++ b/pkg/function/utils.go @@ -38,6 +38,10 @@ const ( PodAnnotationsArg = "podAnnotations" ) +const ( + defaultContainerAnn = "kubectl.kubernetes.io/default-container" +) + // ValidateCredentials verifies if the given credentials have appropriate values set func ValidateCredentials(creds *param.Credential) error { if creds == nil { diff --git a/pkg/kube/pod.go b/pkg/kube/pod.go index 339189110a..5819d02a00 100644 --- a/pkg/kube/pod.go +++ b/pkg/kube/pod.go @@ -172,7 +172,7 @@ func GetPodObjectFromPodOptions(ctx context.Context, cli kubernetes.Interface, o } // Patch default Pod Specs if needed - patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride) + patchedSpecs, err := PatchDefaultPodSpecs(defaultSpecs, opts.PodOverride) if err != nil { return nil, errkit.Wrap(err, "Failed to create pod. Failed to override pod specs.", "namespace", opts.Namespace, "nameFmt", opts.GenerateName) } @@ -515,8 +515,8 @@ func WaitForPodCompletion(ctx context.Context, cli kubernetes.Interface, namespa return errkit.Wrap(err, errorMessage) } -// use Strategic Merge to patch default pod specs with the passed specs -func patchDefaultPodSpecs(defaultPodSpecs corev1.PodSpec, override crv1alpha1.JSONMap) (corev1.PodSpec, error) { +// PatchDefaultPodSpecs paches default pod specs with the passed override using Strategic Merge. +func PatchDefaultPodSpecs(defaultPodSpecs corev1.PodSpec, override crv1alpha1.JSONMap) (corev1.PodSpec, error) { // Merge default specs and override specs with StrategicMergePatch mergedPatch, err := strategicMergeJSONPatch(defaultPodSpecs, override) if err != nil { diff --git a/pkg/kube/pod_test.go b/pkg/kube/pod_test.go index 9b6959e17d..93adfecc54 100644 --- a/pkg/kube/pod_test.go +++ b/pkg/kube/pod_test.go @@ -845,7 +845,7 @@ func (s *PodSuite) TestPatchDefaultPodSpecs(c *check.C) { for _, test := range tests { override, err := CreateAndMergeJSONPatch(test.BlueprintPodSpecs, test.ActionsetPodSpecs) c.Assert(err, check.IsNil) - podSpec, err := patchDefaultPodSpecs(defaultSpecs, override) + podSpec, err := PatchDefaultPodSpecs(defaultSpecs, override) c.Assert(err, check.IsNil) c.Assert(podSpec, check.DeepEquals, test.Expected) } diff --git a/releasenotes/notes/multi-container-run-function-d488516c0f3b22c6.yaml b/releasenotes/notes/multi-container-run-function-d488516c0f3b22c6.yaml new file mode 100644 index 0000000000..55b89385c6 --- /dev/null +++ b/releasenotes/notes/multi-container-run-function-d488516c0f3b22c6.yaml @@ -0,0 +1,2 @@ +--- +features: Introduced new Kanister function ``MultiContainerRun`` to run pods with two containers connected by shared volume