From fbaaf4906c4ce3884c72a61c97cbde9da60f00ad Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Thu, 14 Mar 2024 08:32:58 -0700 Subject: [PATCH 001/293] bump vgpu-dm, k8s-dm and kubevirt-dp versions Signed-off-by: Tariq Ibrahim --- ...gpu-operator-certified.clusterserviceversion.yaml | 12 ++++++------ deployments/gpu-operator/values.yaml | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index f99de06f5..193f77cd8 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -222,13 +222,13 @@ spec: - name: gpu-operator-validator-image image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8 - name: k8s-driver-manager-image - image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:27c44f4720a4abf780217bd5e7903e4a008ebdbcf71238c4f106a0c22654776c + image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:e349905fb91d8ebb3c6487e739ef004a55fd8ec7ba5c5c910b25c5a700f91f53 - name: vfio-manager-image image: nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d - name: sandbox-device-plugin-image - image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:387021553dfb16aab633228d42f63f04fa932b4f46add07527f296dfe97e5148 + image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3 - name: vgpu-device-manager-image - image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:925f4a47710e4318ed457930f5406174c1f6d28b1bf6b1bc310687fec0fde712 + image: nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:d5a7aa8bb1fe7c93e0b79acd162c5ad90463cf148c16f2d5ed8c3cbed8e2fe27 - name: gdrcopy-image image: nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31 customresourcedefinitions: @@ -906,7 +906,7 @@ spec: - name: "DRIVER_IMAGE-470" value: "nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af" - name: "DRIVER_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:27c44f4720a4abf780217bd5e7903e4a008ebdbcf71238c4f106a0c22654776c" + value: "nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:e349905fb91d8ebb3c6487e739ef004a55fd8ec7ba5c5c910b25c5a700f91f53" - name: "MIG_MANAGER_IMAGE" value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0" - name: "CUDA_BASE_IMAGE" @@ -914,9 +914,9 @@ spec: - name: "VFIO_MANAGER_IMAGE" value: "nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d" - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" - value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:387021553dfb16aab633228d42f63f04fa932b4f46add07527f296dfe97e5148" + value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3" - name: "VGPU_DEVICE_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:925f4a47710e4318ed457930f5406174c1f6d28b1bf6b1bc310687fec0fde712" + value: "nvcr.io/nvidia/cloud-native/vgpu-device-manager@sha256:d5a7aa8bb1fe7c93e0b79acd162c5ad90463cf148c16f2d5ed8c3cbed8e2fe27" - name: "GDRCOPY_IMAGE" value: "nvcr.io/nvidia/cloud-native/gdrdrv@sha256:1ae0b923bc57f47bab046b50c50110f6914bbaffbfef704df34b3fe332db2e31" terminationGracePeriodSeconds: 10 diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index f7c776fdf..2df3778b8 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -181,7 +181,7 @@ driver: repository: nvcr.io/nvidia/cloud-native # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.5 + version: v0.6.7 imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION @@ -393,7 +393,7 @@ vgpuManager: repository: nvcr.io/nvidia/cloud-native # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.4 + version: v0.6.7 imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION @@ -405,7 +405,7 @@ vgpuDeviceManager: enabled: true repository: nvcr.io/nvidia/cloud-native image: vgpu-device-manager - version: "v0.2.4" + version: "v0.2.5" imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] @@ -427,7 +427,7 @@ vfioManager: repository: nvcr.io/nvidia/cloud-native # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.5 + version: v0.6.7 imagePullPolicy: IfNotPresent env: - name: ENABLE_GPU_POD_EVICTION @@ -463,7 +463,7 @@ sandboxDevicePlugin: enabled: true repository: nvcr.io/nvidia image: kubevirt-gpu-device-plugin - version: v1.2.4 + version: v1.2.6 imagePullPolicy: IfNotPresent imagePullSecrets: [] args: [] From 872d996e38abce5ae02c11a39b91aad2d56ae3bc Mon Sep 17 00:00:00 2001 From: Christopher Desiniotis Date: Fri, 15 Mar 2024 04:27:17 +0000 Subject: [PATCH 002/293] Allow MPS root to be configurable in the device-plugin --- api/v1/clusterpolicy_types.go | 16 ++++++++++ api/v1/zz_generated.deepcopy.go | 20 +++++++++++++ ...rator-certified.clusterserviceversion.yaml | 3 ++ .../manifests/nvidia.com_clusterpolicies.yaml | 21 ++++++++++---- .../crd/bases/nvidia.com_clusterpolicies.yaml | 21 ++++++++++---- controllers/object_controls.go | 29 +++++++++++++++++++ .../crds/nvidia.com_clusterpolicies_crd.yaml | 21 ++++++++++---- deployments/gpu-operator/values.yaml | 4 +++ 8 files changed, 117 insertions(+), 18 deletions(-) diff --git a/api/v1/clusterpolicy_types.go b/api/v1/clusterpolicy_types.go index 44d6c95f3..4b1cbed9b 100644 --- a/api/v1/clusterpolicy_types.go +++ b/api/v1/clusterpolicy_types.go @@ -734,6 +734,11 @@ type DevicePluginSpec struct { // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Configuration for the NVIDIA Device Plugin via the ConfigMap" Config *DevicePluginConfig `json:"config,omitempty"` + + // Optional: MPS related configuration for the NVIDIA Device Plugin + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="MPS related configuration for the NVIDIA Device Plugin" + MPS *MPSConfig `json:"mps,omitempty"` } // DevicePluginConfig defines ConfigMap name for NVIDIA Device Plugin config @@ -752,6 +757,17 @@ type DevicePluginConfig struct { Default string `json:"default,omitempty"` } +// MPSConfig defines MPS related configuration for the NVIDIA Device Plugin +type MPSConfig struct { + // Root defines the MPS root path on the host + // +kubebuilder:validation:Optional + // +kubebuilder:default=/run/nvidia/mps + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="MPS root path on the host" + // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:text" + Root string `json:"root,omitempty"` +} + // SandboxDevicePluginSpec defines the properties for the NVIDIA Sandbox Device Plugin deployment type SandboxDevicePluginSpec struct { // Enabled indicates if deployment of NVIDIA Sandbox Device Plugin through operator is enabled diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index d80b36109..4f9ec6e68 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -494,6 +494,11 @@ func (in *DevicePluginSpec) DeepCopyInto(out *DevicePluginSpec) { *out = new(DevicePluginConfig) **out = **in } + if in.MPS != nil { + in, out := &in.MPS, &out.MPS + *out = new(MPSConfig) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicePluginSpec. @@ -1032,6 +1037,21 @@ func (in *MIGSpec) DeepCopy() *MIGSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MPSConfig) DeepCopyInto(out *MPSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MPSConfig. +func (in *MPSConfig) DeepCopy() *MPSConfig { + if in == nil { + return nil + } + out := new(MPSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeStatusExporterSpec) DeepCopyInto(out *NodeStatusExporterSpec) { *out = *in diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 193f77cd8..41ba53f78 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -101,6 +101,9 @@ metadata: "config": { "name": "", "default": "" + }, + "mps": { + "root": "/run/nvidia/mps" } }, "gfd": { diff --git a/bundle/manifests/nvidia.com_clusterpolicies.yaml b/bundle/manifests/nvidia.com_clusterpolicies.yaml index 16e35bf4b..bf9ed381d 100644 --- a/bundle/manifests/nvidia.com_clusterpolicies.yaml +++ b/bundle/manifests/nvidia.com_clusterpolicies.yaml @@ -538,6 +538,15 @@ spec: items: type: string type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object repository: description: NVIDIA Device Plugin image repository type: string @@ -926,9 +935,9 @@ spec: type: boolean timeoutSeconds: default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -944,9 +953,9 @@ spec: type: string timeoutSeconds: default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object diff --git a/config/crd/bases/nvidia.com_clusterpolicies.yaml b/config/crd/bases/nvidia.com_clusterpolicies.yaml index 16e35bf4b..bf9ed381d 100644 --- a/config/crd/bases/nvidia.com_clusterpolicies.yaml +++ b/config/crd/bases/nvidia.com_clusterpolicies.yaml @@ -538,6 +538,15 @@ spec: items: type: string type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object repository: description: NVIDIA Device Plugin image repository type: string @@ -926,9 +935,9 @@ spec: type: boolean timeoutSeconds: default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -944,9 +953,9 @@ spec: type: string timeoutSeconds: default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object diff --git a/controllers/object_controls.go b/controllers/object_controls.go index b66b0b630..8384c8eb0 100644 --- a/controllers/object_controls.go +++ b/controllers/object_controls.go @@ -164,6 +164,10 @@ const ( DefaultCCModeEnvName = "DEFAULT_CC_MODE" // OpenKernelModulesEnabledEnvName is the name of the driver-container envvar for enabling open GPU kernel module support OpenKernelModulesEnabledEnvName = "OPEN_KERNEL_MODULES_ENABLED" + // MPSRootEnvName is the name of the envvar for configuring the MPS root + MPSRootEnvName = "MPS_ROOT" + // DefaultMPSRoot is the default MPS root path on the host + DefaultMPSRoot = "/run/nvidia/mps" ) // ContainerProbe defines container probe types @@ -1281,6 +1285,19 @@ func TransformDevicePlugin(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe } } + // update MPS volumes and set MPS_ROOT env var if a custom MPS root is configured + if config.DevicePlugin.MPS != nil && config.DevicePlugin.MPS.Root != "" && + config.DevicePlugin.MPS.Root != DefaultMPSRoot { + for i, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "mps-root" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = config.DevicePlugin.MPS.Root + } else if volume.Name == "mps-shm" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = filepath.Join(config.DevicePlugin.MPS.Root, "shm") + } + } + setContainerEnv(&(obj.Spec.Template.Spec.Containers[0]), MPSRootEnvName, config.DevicePlugin.MPS.Root) + } + return nil } @@ -1346,6 +1363,18 @@ func TransformMPSControlDaemon(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic // update env required for MIG support applyMIGConfiguration(mainContainer, config.MIG.Strategy) + // update MPS volumes if a custom MPS root is configured + if config.DevicePlugin.MPS != nil && config.DevicePlugin.MPS.Root != "" && + config.DevicePlugin.MPS.Root != DefaultMPSRoot { + for i, volume := range obj.Spec.Template.Spec.Volumes { + if volume.Name == "mps-root" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = config.DevicePlugin.MPS.Root + } else if volume.Name == "mps-shm" { + obj.Spec.Template.Spec.Volumes[i].HostPath.Path = filepath.Join(config.DevicePlugin.MPS.Root, "shm") + } + } + } + return nil } diff --git a/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml b/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml index 16e35bf4b..bf9ed381d 100644 --- a/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml +++ b/deployments/gpu-operator/crds/nvidia.com_clusterpolicies_crd.yaml @@ -538,6 +538,15 @@ spec: items: type: string type: array + mps: + description: 'Optional: MPS related configuration for the NVIDIA + Device Plugin' + properties: + root: + default: /run/nvidia/mps + description: Root defines the MPS root path on the host + type: string + type: object repository: description: NVIDIA Device Plugin image repository type: string @@ -926,9 +935,9 @@ spec: type: boolean timeoutSeconds: default: 300 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object @@ -944,9 +953,9 @@ spec: type: string timeoutSeconds: default: 0 - description: TimeoutSecond specifies the length of time - in seconds to wait before giving up on pod termination, - zero means infinite + description: |- + TimeoutSecond specifies the length of time in seconds to wait before giving up on pod termination, zero means + infinite minimum: 0 type: integer type: object diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 2df3778b8..7df87cfb5 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -277,6 +277,10 @@ devicePlugin: default: "" # Data section for the ConfigMap to create (i.e only applies when create=true) data: {} + # MPS related configuration for the plugin + mps: + # MPS root path on the host + root: "/run/nvidia/mps" # standalone dcgm hostengine dcgm: From 2ad4e487374c9bdd14472c7df1015e249b9e0d31 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Thu, 14 Mar 2024 18:42:41 -0700 Subject: [PATCH 003/293] bump dcgm and dcgm-exporter to the latest stable releases Signed-off-by: Tariq Ibrahim --- .../gpu-operator-certified.clusterserviceversion.yaml | 8 ++++---- deployments/gpu-operator/values.yaml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 41ba53f78..304b6ee9b 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -203,9 +203,9 @@ spec: - name: gpu-operator-image image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator:master-latest-ubi8 - name: dcgm-exporter-image - image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:011fb450af3fa2e8fe5d28d590e4c653631447bc23d149591ced3d89089c4f2c + image: nvcr.io/nvidia/k8s/dcgm-exporter@sha256:47041ebb3f426ffb0c9dc16fde2ff924af8d567ce4a9213d3f657df789042ce7 - name: dcgm-image - image: nvcr.io/nvidia/cloud-native/dcgm@sha256:6a05d6a1923fda756aed0dddf7ed23a83c30cf1e6c519fc39dd70c0309ec8257 + image: nvcr.io/nvidia/cloud-native/dcgm@sha256:2142a46050b43da4d265697880d778fa14b0aab0395ccbe7c83662dac4cbc437 - name: container-toolkit-image image: nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4 - name: driver-image @@ -897,9 +897,9 @@ spec: - name: "CONTAINER_TOOLKIT_IMAGE" value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4" - name: "DCGM_IMAGE" - value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:6a05d6a1923fda756aed0dddf7ed23a83c30cf1e6c519fc39dd70c0309ec8257" + value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:2142a46050b43da4d265697880d778fa14b0aab0395ccbe7c83662dac4cbc437" - name: "DCGM_EXPORTER_IMAGE" - value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:011fb450af3fa2e8fe5d28d590e4c653631447bc23d149591ced3d89089c4f2c" + value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:47041ebb3f426ffb0c9dc16fde2ff924af8d567ce4a9213d3f657df789042ce7" - name: "DEVICE_PLUGIN_IMAGE" value: "nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320" - name: "DRIVER_IMAGE" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 7df87cfb5..562286a6b 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -288,7 +288,7 @@ dcgm: enabled: false repository: nvcr.io/nvidia/cloud-native image: dcgm - version: 3.3.0-1-ubuntu22.04 + version: 3.3.3-1-ubuntu22.04 imagePullPolicy: IfNotPresent hostPort: 5555 args: [] @@ -299,7 +299,7 @@ dcgmExporter: enabled: true repository: nvcr.io/nvidia/k8s image: dcgm-exporter - version: 3.3.0-3.2.0-ubuntu22.04 + version: 3.3.5-3.4.0-ubuntu22.04 imagePullPolicy: IfNotPresent env: - name: DCGM_EXPORTER_LISTEN From 3215163b80a71a93a90d50ff9f596ecc077d9258 Mon Sep 17 00:00:00 2001 From: Shiva Krishna Merla Date: Sun, 17 Mar 2024 23:06:35 +0000 Subject: [PATCH 004/293] Fix command line used to signal GFD reload on config change --- assets/gpu-feature-discovery/0500_daemonset.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assets/gpu-feature-discovery/0500_daemonset.yaml b/assets/gpu-feature-discovery/0500_daemonset.yaml index 3bae575a7..10516e463 100644 --- a/assets/gpu-feature-discovery/0500_daemonset.yaml +++ b/assets/gpu-feature-discovery/0500_daemonset.yaml @@ -106,7 +106,7 @@ spec: - name: SIGNAL value: "1" # SIGHUP - name: PROCESS_TO_SIGNAL - value: "/usr/bin/gpu-feature-discovery" + value: "gpu-feature-discovery" volumes: - name: output-dir hostPath: From 8470e816f0841a37a39bee162bb1acf1e2151365 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Mon, 18 Mar 2024 15:22:15 -0700 Subject: [PATCH 005/293] bump k8s-device-plugin to v0.15.0-rc.2 Signed-off-by: Tariq Ibrahim --- .../gpu-operator-certified.clusterserviceversion.yaml | 8 ++++---- deployments/gpu-operator/values.yaml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 304b6ee9b..47f2c42a5 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -215,9 +215,9 @@ spec: - name: driver-image-470 image: nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af - name: device-plugin-image - image: nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320 + image: nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca - name: gpu-feature-discovery-image - image: nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320 + image: nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca - name: mig-manager-image image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0 - name: init-container-image @@ -893,7 +893,7 @@ spec: - name: "VALIDATOR_IMAGE" value: "registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8" - name: "GFD_IMAGE" - value: "nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca" - name: "CONTAINER_TOOLKIT_IMAGE" value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4" - name: "DCGM_IMAGE" @@ -901,7 +901,7 @@ spec: - name: "DCGM_EXPORTER_IMAGE" value: "nvcr.io/nvidia/k8s/dcgm-exporter@sha256:47041ebb3f426ffb0c9dc16fde2ff924af8d567ce4a9213d3f657df789042ce7" - name: "DEVICE_PLUGIN_IMAGE" - value: "nvcr.io/nvidia/k8s-device-plugin@sha256:2a1baf95eb414e6a451c7da2ac6c2992ff81ac95e407a4b254367f18ca9aa320" + value: "nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca" - name: "DRIVER_IMAGE" value: "nvcr.io/nvidia/driver@sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e" - name: "DRIVER_IMAGE-535" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 562286a6b..dd03cadce 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -230,7 +230,7 @@ devicePlugin: enabled: true repository: nvcr.io/nvidia image: k8s-device-plugin - version: v0.15.0-rc.1-ubi8 + version: v0.15.0-rc.2-ubi8 imagePullPolicy: IfNotPresent imagePullSecrets: [] args: [] From 6a7b70850178af2f3889b7e5869f8cf1c98b9773 Mon Sep 17 00:00:00 2001 From: Carlos Eduardo Arango Gutierrez Date: Fri, 15 Mar 2024 16:29:12 +0100 Subject: [PATCH 006/293] Bump NFD to v0.15.3 Signed-off-by: Carlos Eduardo Arango Gutierrez --- deployments/gpu-operator/Chart.lock | 6 +- deployments/gpu-operator/Chart.yaml | 2 +- .../charts/node-feature-discovery/Chart.yaml | 4 +- .../charts/node-feature-discovery/README.md | 2 +- .../crds/nfd-api-crds.yaml | 105 ++++++++++++++---- .../templates/cert-manager-certs.yaml | 7 +- .../templates/clusterrole.yaml | 2 +- .../templates/clusterrolebinding.yaml | 2 +- .../templates/master.yaml | 28 ++--- .../templates/nfd-gc.yaml | 3 + .../templates/nfd-master-conf.yaml | 2 + .../templates/nfd-worker-conf.yaml | 2 + .../templates/role.yaml | 2 +- .../templates/rolebinding.yaml | 2 +- .../templates/service.yaml | 2 + .../templates/serviceaccount.yaml | 4 +- .../templates/worker.yaml | 12 +- .../charts/node-feature-discovery/values.yaml | 35 ++++-- 18 files changed, 157 insertions(+), 65 deletions(-) diff --git a/deployments/gpu-operator/Chart.lock b/deployments/gpu-operator/Chart.lock index f5c7fcca3..49c9dca17 100644 --- a/deployments/gpu-operator/Chart.lock +++ b/deployments/gpu-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: node-feature-discovery repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts - version: 0.14.2 -digest: sha256:84ec59c0c12da825ca7dc25bdac63d0f2106822a129f7fe1f9d60a4023a543ce -generated: "2023-10-10T11:26:00.823757+02:00" + version: 0.15.3 +digest: sha256:541f77527d949998bca4bc887e2283f2fb41892f8f61635d8afab03b7fa9f134 +generated: "2024-03-15T16:27:37.327877+01:00" diff --git a/deployments/gpu-operator/Chart.yaml b/deployments/gpu-operator/Chart.yaml index 08cdee92a..1fd4d83ff 100644 --- a/deployments/gpu-operator/Chart.yaml +++ b/deployments/gpu-operator/Chart.yaml @@ -19,6 +19,6 @@ keywords: dependencies: - name: node-feature-discovery - version: v0.14.2 + version: v0.15.3 repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts condition: nfd.enabled diff --git a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml index 8bd1f818d..762f8c8f1 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.14.2 +appVersion: v0.15.3 description: 'Detects hardware features available on each node in a Kubernetes cluster, and advertises those features using node labels. ' home: https://github.com/kubernetes-sigs/node-feature-discovery @@ -11,4 +11,4 @@ name: node-feature-discovery sources: - https://github.com/kubernetes-sigs/node-feature-discovery type: application -version: 0.14.2 +version: 0.15.3 diff --git a/deployments/gpu-operator/charts/node-feature-discovery/README.md b/deployments/gpu-operator/charts/node-feature-discovery/README.md index 16b5254d5..b8b7d90ca 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/README.md +++ b/deployments/gpu-operator/charts/node-feature-discovery/README.md @@ -6,5 +6,5 @@ labels. NFD provides flexible configuration and extension points for a wide range of vendor and application specific node labeling needs. See -[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.14/deployment/helm.html) +[NFD documentation](https://kubernetes-sigs.github.io/node-feature-discovery/v0.15/deployment/helm.html) for deployment instructions. diff --git a/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml b/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml index 6866c7ffe..4e6304163 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/crds/nfd-api-crds.yaml @@ -153,6 +153,11 @@ spec: description: Rule defines a rule for node customization such as labeling. properties: + annotations: + additionalProperties: + type: string + description: Annotations to create if the rule matches. + type: object extendedResources: additionalProperties: type: string @@ -185,19 +190,16 @@ spec: in the feature set. properties: feature: + description: Feature is the name of the feature + set to match against. type: string matchExpressions: additionalProperties: - description: "MatchExpression specifies an expression + description: MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator - evaluates the input against. \n NB: CreateMatchExpression - or MustCreateMatchExpression() should be used - for creating new instances. \n NB: Validate() - must be called if Op or Value fields are modified - or if a new instance is created from scratch - without using the helper functions." + evaluates the input against. properties: op: description: Op is the operator to be applied. @@ -229,13 +231,46 @@ spec: required: - op type: object - description: MatchExpressionSet contains a set of - MatchExpressions, each of which is evaluated against - a set of input values. + description: MatchExpressions is the set of per-element + expressions evaluated. These match against the + value of the specified elements. + type: object + matchName: + description: MatchName in an expression that is + matched against the name of each element in the + feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: Value is the list of values that + the operand evaluates the input against. Value + should be empty if the operator is Exists, + DoesNotExist, IsTrue or IsFalse. Value should + contain exactly one element if the operator + is Gt or Lt and exactly two elements if the + operator is GtLt. In other cases Value should + contain at least one element. + items: + type: string + type: array + required: + - op type: object required: - feature - - matchExpressions type: object type: array required: @@ -251,18 +286,16 @@ spec: are evaluated against each element in the feature set. properties: feature: + description: Feature is the name of the feature set to + match against. type: string matchExpressions: additionalProperties: - description: "MatchExpression specifies an expression + description: MatchExpression specifies an expression to evaluate against a set of input values. It contains an operator that is applied when matching the input and an array of values that the operator evaluates - the input against. \n NB: CreateMatchExpression or - MustCreateMatchExpression() should be used for creating - new instances. \n NB: Validate() must be called if - Op or Value fields are modified or if a new instance - is created from scratch without using the helper functions." + the input against. properties: op: description: Op is the operator to be applied. @@ -292,12 +325,44 @@ spec: required: - op type: object - description: MatchExpressionSet contains a set of MatchExpressions, - each of which is evaluated against a set of input values. + description: MatchExpressions is the set of per-element + expressions evaluated. These match against the value + of the specified elements. + type: object + matchName: + description: MatchName in an expression that is matched + against the name of each element in the feature set. + properties: + op: + description: Op is the operator to be applied. + enum: + - In + - NotIn + - InRegexp + - Exists + - DoesNotExist + - Gt + - Lt + - GtLt + - IsTrue + - IsFalse + type: string + value: + description: Value is the list of values that the + operand evaluates the input against. Value should + be empty if the operator is Exists, DoesNotExist, + IsTrue or IsFalse. Value should contain exactly + one element if the operator is Gt or Lt and exactly + two elements if the operator is GtLt. In other cases + Value should contain at least one element. + items: + type: string + type: array + required: + - op type: object required: - feature - - matchExpressions type: object type: array name: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml index ac2e51fc1..8af115316 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/cert-manager-certs.yaml @@ -1,4 +1,5 @@ {{- if .Values.tls.certManager }} +{{- if .Values.master.enable }} --- apiVersion: cert-manager.io/v1 kind: Certificate @@ -17,14 +18,13 @@ spec: # first one is configured for use by the worker; below are for completeness - {{ include "node-feature-discovery.fullname" . }}-master.{{ include "node-feature-discovery.namespace" . }}.svc - {{ include "node-feature-discovery.fullname" . }}-master.{{ include "node-feature-discovery.namespace" . }}.svc.cluster.local - # localhost needed for grpc_health_probe - - localhost issuerRef: name: nfd-ca-issuer kind: Issuer group: cert-manager.io - +{{- end }} --- +{{- if .Values.worker.enable }} apiVersion: cert-manager.io/v1 kind: Certificate metadata: @@ -42,6 +42,7 @@ spec: name: nfd-ca-issuer kind: Issuer group: cert-manager.io +{{- end }} {{- if .Values.topologyUpdater.enable }} --- diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml index d4329338b..e652e1df8 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.rbac.create }} +{{- if and .Values.master.enable .Values.master.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml index 87b3003e2..99134a1c5 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.rbac.create }} +{{- if and .Values.master.enable .Values.master.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml index e77ca136c..53a291e0f 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/master.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enable }} apiVersion: apps/v1 kind: Deployment metadata: @@ -41,29 +42,13 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} livenessProbe: - exec: - command: - - "/usr/bin/grpc_health_probe" - - "-addr=:{{ .Values.master.port | default "8080" }}" - {{- if .Values.tls.enable }} - - "-tls" - - "-tls-ca-cert=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - - "-tls-client-key=/etc/kubernetes/node-feature-discovery/certs/tls.key" - - "-tls-client-cert=/etc/kubernetes/node-feature-discovery/certs/tls.crt" - {{- end }} + grpc: + port: 8080 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: - exec: - command: - - "/usr/bin/grpc_health_probe" - - "-addr=:{{ .Values.master.port | default "8080" }}" - {{- if .Values.tls.enable }} - - "-tls" - - "-tls-ca-cert=/etc/kubernetes/node-feature-discovery/certs/ca.crt" - - "-tls-client-key=/etc/kubernetes/node-feature-discovery/certs/tls.key" - - "-tls-client-cert=/etc/kubernetes/node-feature-discovery/certs/tls.crt" - {{- end }} + grpc: + port: 8080 initialDelaySeconds: 5 periodSeconds: 10 failureThreshold: 10 @@ -85,8 +70,8 @@ spec: {{- if .Values.master.instance | empty | not }} - "-instance={{ .Values.master.instance }}" {{- end }} - - "-port={{ .Values.master.port | default "8080" }}" {{- if not .Values.enableNodeFeatureApi }} + - "-port={{ .Values.master.port | default "8080" }}" - "-enable-nodefeature-api=false" {{- else if gt (int .Values.master.replicaCount) 1 }} - "-enable-leader-election" @@ -157,3 +142,4 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml index d803eef40..1e0e12327 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-gc.yaml @@ -58,6 +58,9 @@ spec: drop: [ "ALL" ] readOnlyRootFilesystem: true runAsNonRoot: true + ports: + - name: metrics + containerPort: {{ .Values.gc.metricsPort | default "8081"}} {{- with .Values.gc.nodeSelector }} nodeSelector: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml index c806a8e5d..9c6e01cde 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-master-conf.yaml @@ -1,3 +1,4 @@ +{{- if .Values.master.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -8,3 +9,4 @@ metadata: data: nfd-master.conf: |- {{- .Values.master.config | toYaml | nindent 4 }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml index 61d2a481a..a2299dea1 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/nfd-worker-conf.yaml @@ -1,3 +1,4 @@ +{{- if .Values.worker.enable }} apiVersion: v1 kind: ConfigMap metadata: @@ -8,3 +9,4 @@ metadata: data: nfd-worker.conf: |- {{- .Values.worker.config | toYaml | nindent 4 }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml index c71ede442..3a872e572 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/role.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.rbac.create }} +{{- if and .Values.worker.enable .Values.worker.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml index d8025be9b..a640d5f8b 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/rolebinding.yaml @@ -1,4 +1,4 @@ -{{- if .Values.worker.rbac.create }} +{{- if and .Values.worker.enable .Values.worker.rbac.create }} apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml index 0d4789818..d71d1555f 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/service.yaml @@ -1,3 +1,4 @@ +{{- if and (not .Values.enableNodeFeatureApi) .Values.master.enable }} apiVersion: v1 kind: Service metadata: @@ -16,3 +17,4 @@ spec: selector: {{- include "node-feature-discovery.selectorLabels" . | nindent 4 }} role: master +{{- end}} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml index 34dc8b753..7da2c877e 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- if .Values.master.serviceAccount.create -}} +{{- if and .Values.master.enable .Values.master.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: @@ -42,7 +42,7 @@ metadata: {{- end }} {{- end }} -{{- if .Values.worker.serviceAccount.create }} +{{- if and .Values.worker.enable .Values.worker.serviceAccount.create }} --- apiVersion: v1 kind: ServiceAccount diff --git a/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml b/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml index 0e56eb5d1..f49f9bd64 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/templates/worker.yaml @@ -1,3 +1,4 @@ +{{- if .Values.worker.enable }} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -44,13 +45,21 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid resources: {{- toYaml .Values.worker.resources | nindent 12 }} command: - "nfd-worker" args: - - "-server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}" {{- if not .Values.enableNodeFeatureApi }} + - "-server={{ include "node-feature-discovery.fullname" . }}-master:{{ .Values.master.service.port }}" - "-enable-nodefeature-api=false" {{- end }} {{- if .Values.tls.enable }} @@ -150,3 +159,4 @@ spec: {{- with .Values.worker.priorityClassName }} priorityClassName: {{ . | quote }} {{- end }} +{{- end }} diff --git a/deployments/gpu-operator/charts/node-feature-discovery/values.yaml b/deployments/gpu-operator/charts/node-feature-discovery/values.yaml index 2291aef4f..d4919bca8 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/values.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/values.yaml @@ -13,8 +13,10 @@ namespaceOverride: "" enableNodeFeatureApi: true master: + enable: true config: ### # noPublish: false + # autoDefaultNs: true # extraLabelNs: ["added.ns.io","added.kubernets.io"] # denyLabelNs: ["denied.ns.io","denied.kubernetes.io"] # resourceLabels: ["vendor-1.com/feature-1","vendor-2.io/feature-2"] @@ -45,6 +47,8 @@ master: # nfdApiParallelism: 10 ### # The TCP port that nfd-master listens for incoming requests. Default: 8080 + # Deprecated this parameter is related to the deprecated gRPC API and will + # be removed with it in a future release port: 8080 metricsPort: 8081 instance: @@ -130,6 +134,7 @@ master: values: [""] worker: + enable: true config: ### #core: # labelWhiteList: @@ -215,7 +220,7 @@ worker: # # The following feature demonstrates the capabilities of the matchFeatures # - name: "my custom rule" # labels: - # my-ng-feature: "true" + # "vendor.io/my-ng-feature": "true" # # matchFeatures implements a logical AND over all matcher terms in the # # list (i.e. all of the terms, or per-feature matchers, must match) # matchFeatures: @@ -286,7 +291,7 @@ worker: # # The following feature demonstrates the capabilities of the matchAny # - name: "my matchAny rule" # labels: - # my-ng-feature-2: "my-value" + # "vendor.io/my-ng-feature-2": "my-value" # # matchAny implements a logical IF over all elements (sub-matchers) in # # the list (i.e. at least one feature matcher must match) # matchAny: @@ -307,10 +312,17 @@ worker: # vendor: {op: In, value: ["8086"]} # class: {op: In, value: ["02"]} # + # - name: "avx wildcard rule" + # labels: + # "my-avx-feature": "true" + # matchFeatures: + # - feature: cpu.cpuid + # matchName: {op: InRegexp, value: ["^AVX512"]} + # # # The following features demonstreate label templating capabilities # - name: "my template rule" # labelsTemplate: | - # {{ range .system.osrelease }}my-system-feature.{{ .Name }}={{ .Value }} + # {{ range .system.osrelease }}vendor.io/my-system-feature.{{ .Name }}={{ .Value }} # {{ end }} # matchFeatures: # - feature: system.osrelease @@ -320,7 +332,7 @@ worker: # # - name: "my template rule 2" # labelsTemplate: | - # {{ range .pci.device }}my-pci-device.{{ .class }}-{{ .device }}=with-cpuid + # {{ range .pci.device }}vendor.io/my-pci-device.{{ .class }}-{{ .device }}=with-cpuid # {{ end }} # matchFeatures: # - feature: pci.device @@ -335,7 +347,7 @@ worker: # # previous labels and vars # - name: "my dummy kernel rule" # labels: - # "my.kernel.feature": "true" + # "vendor.io/my.kernel.feature": "true" # matchFeatures: # - feature: kernel.version # matchExpressions: @@ -350,13 +362,20 @@ worker: # # - name: "my rule using backrefs" # labels: - # "my.backref.feature": "true" + # "vendor.io/my.backref.feature": "true" # matchFeatures: # - feature: rule.matched # matchExpressions: - # my.kernel.feature: {op: IsTrue} + # vendor.io/my.kernel.feature: {op: IsTrue} # my.dummy.var: {op: Gt, value: ["0"]} # + # - name: "kconfig template rule" + # labelsTemplate: | + # {{ range .kernel.config }}kconfig-{{ .Name }}={{ .Value }} + # {{ end }} + # matchFeatures: + # - feature: kernel.config + # matchName: {op: In, value: ["SWAP", "X86", "ARM"]} ### metricsPort: 8081 @@ -493,6 +512,8 @@ gc: # cpu: 100m # memory: 128Mi + metricsPort: 8081 + nodeSelector: {} tolerations: [] annotations: {} From e8a89aa5537eca8cf339a771aab0bf6ae3f64e49 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Mon, 18 Mar 2024 18:25:32 -0700 Subject: [PATCH 007/293] bump gpu-feature-discovery to v0.15.0-rc.2 Signed-off-by: Tariq Ibrahim --- assets/gpu-feature-discovery/0500_daemonset.yaml | 8 ++++++++ deployments/gpu-operator/values.yaml | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/assets/gpu-feature-discovery/0500_daemonset.yaml b/assets/gpu-feature-discovery/0500_daemonset.yaml index 10516e463..e7a43a18a 100644 --- a/assets/gpu-feature-discovery/0500_daemonset.yaml +++ b/assets/gpu-feature-discovery/0500_daemonset.yaml @@ -71,6 +71,14 @@ spec: value: 60s - name: GFD_FAIL_ON_INIT_ERROR value: "true" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName volumeMounts: - name: output-dir mountPath: "/etc/kubernetes/node-feature-discovery/features.d" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index dd03cadce..85df8f4dd 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -326,7 +326,7 @@ gfd: enabled: true repository: nvcr.io/nvidia image: k8s-device-plugin - version: v0.15.0-rc.1-ubi8 + version: v0.15.0-rc.2-ubi8 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: From 110931104885bae7b592bebd619d95dc5bb5ddff Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Wed, 20 Mar 2024 15:00:32 -0700 Subject: [PATCH 008/293] bump driver versions for R550 & R535 Signed-off-by: Tariq Ibrahim --- .../gpu-operator-certified.clusterserviceversion.yaml | 10 +++++----- config/samples/nvidia_v1alpha1_nvidiadriver.yaml | 2 +- deployments/gpu-operator/values.yaml | 2 +- tests/scripts/.definitions.sh | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 47f2c42a5..7acf69274 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -161,7 +161,7 @@ metadata: "driverType": "gpu", "repository": "nvcr.io/nvidia", "image": "driver", - "version": "sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e", + "version": "sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3", "nodeSelector": {}, "manager": {}, "repoConfig": { @@ -209,9 +209,9 @@ spec: - name: container-toolkit-image image: nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4 - name: driver-image - image: nvcr.io/nvidia/driver@sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e + image: nvcr.io/nvidia/driver@sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3 - name: driver-image-535 - image: nvcr.io/nvidia/driver@sha256:abda1ac56371d55917b96ff330109980f468e133c9d5705da0ef87429f14ccd7 + image: nvcr.io/nvidia/driver@sha256:a836ccbe21da735aee1c39b81060ed5e2fdb4ffa339874baaf4634f1e9259f74 - name: driver-image-470 image: nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af - name: device-plugin-image @@ -903,9 +903,9 @@ spec: - name: "DEVICE_PLUGIN_IMAGE" value: "nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca" - name: "DRIVER_IMAGE" - value: "nvcr.io/nvidia/driver@sha256:7481a3697783dcdca9ae78e7b548a6900e86ea33ab49ec14f0ba55db2fdb1a2e" + value: "nvcr.io/nvidia/driver@sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3" - name: "DRIVER_IMAGE-535" - value: "nvcr.io/nvidia/driver@sha256:abda1ac56371d55917b96ff330109980f468e133c9d5705da0ef87429f14ccd7" + value: "nvcr.io/nvidia/driver@sha256:a836ccbe21da735aee1c39b81060ed5e2fdb4ffa339874baaf4634f1e9259f74" - name: "DRIVER_IMAGE-470" value: "nvcr.io/nvidia/driver@sha256:56c79482582cdfbc58d3134e8672637c5bf05f328880f76898f526143d04c6af" - name: "DRIVER_MANAGER_IMAGE" diff --git a/config/samples/nvidia_v1alpha1_nvidiadriver.yaml b/config/samples/nvidia_v1alpha1_nvidiadriver.yaml index c49b2156c..9d5a90e63 100644 --- a/config/samples/nvidia_v1alpha1_nvidiadriver.yaml +++ b/config/samples/nvidia_v1alpha1_nvidiadriver.yaml @@ -8,7 +8,7 @@ spec: driverType: gpu repository: nvcr.io/nvidia image: driver - version: "535.154.05" + version: "550.54.15" imagePullPolicy: IfNotPresent imagePullSecrets: [] nodeSelector: {} diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 85df8f4dd..27a0190fe 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -131,7 +131,7 @@ driver: usePrecompiled: false repository: nvcr.io/nvidia image: driver - version: "550.54.14" + version: "550.54.15" imagePullPolicy: IfNotPresent imagePullSecrets: [] startupProbe: diff --git a/tests/scripts/.definitions.sh b/tests/scripts/.definitions.sh index 5655c8a3d..0a6397f12 100644 --- a/tests/scripts/.definitions.sh +++ b/tests/scripts/.definitions.sh @@ -17,7 +17,7 @@ TERRAFORM="terraform -chdir=${TERRAFORM_DIR}" : ${LOG_DIR:="/tmp/logs"} : ${PROJECT:="$(basename "${PROJECT_DIR}")"} : ${TEST_NAMESPACE:="test-operator"} -: ${TARGET_DRIVER_VERSION:="535.104.05"} +: ${TARGET_DRIVER_VERSION:="535.161.08"} : ${OPERATOR_IMAGE:="nvcr.io/nvidia/gpu-operator"} From 34fa3d4844db4da717471d00a0dafa56d211c486 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Wed, 20 Mar 2024 17:50:11 -0700 Subject: [PATCH 009/293] bump container-toolkit version v1.15.0-rc.4 Signed-off-by: Tariq Ibrahim --- .../gpu-operator-certified.clusterserviceversion.yaml | 4 ++-- deployments/gpu-operator/values.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 7acf69274..5b142266b 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -207,7 +207,7 @@ spec: - name: dcgm-image image: nvcr.io/nvidia/cloud-native/dcgm@sha256:2142a46050b43da4d265697880d778fa14b0aab0395ccbe7c83662dac4cbc437 - name: container-toolkit-image - image: nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4 + image: nvcr.io/nvidia/k8s/container-toolkit@sha256:821a5ce75cfcf24a64121840d5dbd2200d61b91e2549536052a5e3fb0059eea8 - name: driver-image image: nvcr.io/nvidia/driver@sha256:96f25c67e5b1072d5981080e12d65ec37eb9ef2fc0494499416aa801b0a34da3 - name: driver-image-535 @@ -895,7 +895,7 @@ spec: - name: "GFD_IMAGE" value: "nvcr.io/nvidia/k8s-device-plugin@sha256:aa92e05ab0906de97ae211366d010560d2a38d9933d7532f667fa5b6f8b4abca" - name: "CONTAINER_TOOLKIT_IMAGE" - value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:59a3875e7a37eb370385e654184efa3a1b193c9ea352165818496b19cbe14aa4" + value: "nvcr.io/nvidia/k8s/container-toolkit@sha256:821a5ce75cfcf24a64121840d5dbd2200d61b91e2549536052a5e3fb0059eea8" - name: "DCGM_IMAGE" value: "nvcr.io/nvidia/cloud-native/dcgm@sha256:2142a46050b43da4d265697880d778fa14b0aab0395ccbe7c83662dac4cbc437" - name: "DCGM_EXPORTER_IMAGE" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 27a0190fe..c08c630ca 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -219,7 +219,7 @@ toolkit: enabled: true repository: nvcr.io/nvidia/k8s image: container-toolkit - version: v1.14.6-ubuntu20.04 + version: v1.15.0-rc.4-ubuntu20.04 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] From dd537defb99734964ab547869b5afc8d77110ab9 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Tue, 26 Mar 2024 13:48:31 -0700 Subject: [PATCH 010/293] add configMap get/list privileges to the RBAC role Signed-off-by: Tariq Ibrahim --- assets/state-dcgm-exporter/0200_role.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/assets/state-dcgm-exporter/0200_role.yaml b/assets/state-dcgm-exporter/0200_role.yaml index 0e6d68768..f055a3b34 100644 --- a/assets/state-dcgm-exporter/0200_role.yaml +++ b/assets/state-dcgm-exporter/0200_role.yaml @@ -15,6 +15,7 @@ rules: - apiGroups: - "" resources: + - configmaps - pods verbs: - get From ea3f4f5b38b4169eb72432d714e4f2a33e3a4a0b Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Thu, 4 Apr 2024 10:42:30 -0700 Subject: [PATCH 011/293] add Scheme only during the init phase Signed-off-by: Tariq Ibrahim --- cmd/gpu-operator/main.go | 10 +- controllers/object_controls.go | 362 ++++++++++++++-------------- controllers/object_controls_test.go | 17 +- controllers/resource_manager.go | 8 +- controllers/state_manager.go | 116 +++++---- 5 files changed, 255 insertions(+), 258 deletions(-) diff --git a/cmd/gpu-operator/main.go b/cmd/gpu-operator/main.go index 4f55e46ec..3bca41c5b 100644 --- a/cmd/gpu-operator/main.go +++ b/cmd/gpu-operator/main.go @@ -28,6 +28,10 @@ import ( "go.uber.org/zap/zapcore" _ "k8s.io/client-go/plugin/pkg/client/auth" + apiconfigv1 "github.com/openshift/api/config/v1" + apiimagev1 "github.com/openshift/api/image/v1" + secv1 "github.com/openshift/api/security/v1" + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" @@ -56,11 +60,13 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(clusterpolicyv1.AddToScheme(scheme)) utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(nvidiav1alpha1.AddToScheme(scheme)) - // +kubebuilder:scaffold:scheme + utilruntime.Must(promv1.AddToScheme(scheme)) + utilruntime.Must(secv1.Install(scheme)) + utilruntime.Must(apiconfigv1.Install(scheme)) + utilruntime.Must(apiimagev1.Install(scheme)) } func main() { diff --git a/controllers/object_controls.go b/controllers/object_controls.go index 8384c8eb0..9b149547f 100644 --- a/controllers/object_controls.go +++ b/controllers/object_controls.go @@ -280,11 +280,11 @@ func ServiceAccount(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ServiceAccount.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ServiceAccount", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ServiceAccount", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -292,11 +292,11 @@ func ServiceAccount(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, skipping update") return gpuv1.Ready, nil @@ -315,11 +315,11 @@ func Role(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].Role.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Role", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Role", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -327,14 +327,14 @@ func Role(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -356,11 +356,11 @@ func RoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].RoleBinding.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("RoleBinding", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("RoleBinding", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -378,14 +378,14 @@ func RoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj.Subjects[idx].Namespace = n.operatorNamespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -407,11 +407,11 @@ func ClusterRole(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ClusterRole.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ClusterRole", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ClusterRole", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -419,14 +419,14 @@ func ClusterRole(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -448,11 +448,11 @@ func ClusterRoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ClusterRoleBinding.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ClusterRoleBinding", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ClusterRoleBinding", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -464,14 +464,14 @@ func ClusterRoleBinding(n ClusterPolicyController) (gpuv1.State, error) { obj.Subjects[idx].Namespace = n.operatorNamespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -494,11 +494,11 @@ func createConfigMap(n ClusterPolicyController, configMapIdx int) (gpuv1.State, obj := n.resources[state].ConfigMaps[configMapIdx].DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ConfigMap", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ConfigMap", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -540,18 +540,18 @@ func createConfigMap(n ClusterPolicyController, configMapIdx int) (gpuv1.State, } } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if !apierrors.IsAlreadyExists(err) { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err } logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -581,7 +581,7 @@ func ConfigMaps(n ClusterPolicyController) (gpuv1.State, error) { func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, error) { kernelVersionMap := make(map[string]string) ctx := n.ctx - logger := n.rec.Log.WithValues("Request.Namespace", "default", "Request.Name", "Node") + logger := n.logger.WithValues("Request.Namespace", "default", "Request.Name", "Node") // Filter only GPU nodes opts := []client.ListOption{ @@ -589,7 +589,7 @@ func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, erro } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { logger.Info("Could not get NodeList", "ERROR", err) return nil, err @@ -629,14 +629,14 @@ func (n ClusterPolicyController) getKernelVersionsMap() (map[string]string, erro func kernelFullVersion(n ClusterPolicyController) (string, string, string) { ctx := n.ctx - logger := n.rec.Log.WithValues("Request.Namespace", "default", "Request.Name", "Node") + logger := n.logger.WithValues("Request.Namespace", "default", "Request.Name", "Node") // We need the node labels to fetch the correct container opts := []client.ListOption{ client.MatchingLabels{"nvidia.com/gpu.present": "true"}, } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { logger.Info("Could not get NodeList", "ERROR", err) return "", "", "" @@ -678,7 +678,7 @@ func kernelFullVersion(n ClusterPolicyController) (string, string, string) { } func preProcessDaemonSet(obj *appsv1.DaemonSet, n ClusterPolicyController) error { - logger := n.rec.Log.WithValues("Daemonset", obj.Name) + logger := n.logger.WithValues("Daemonset", obj.Name) transformations := map[string]func(*appsv1.DaemonSet, *gpuv1.ClusterPolicySpec, ClusterPolicyController) error{ "nvidia-driver-daemonset": TransformDriver, "nvidia-vgpu-manager-daemonset": TransformVGPUManager, @@ -1020,17 +1020,17 @@ func getOrCreateTrustedCAConfigMap(n ClusterPolicyController, name string) (*cor configMap.ObjectMeta.Labels = make(map[string]string) configMap.ObjectMeta.Labels["config.openshift.io/inject-trusted-cabundle"] = "true" - logger := n.rec.Log.WithValues("ConfigMap", configMap.ObjectMeta.Name, "Namespace", configMap.ObjectMeta.Namespace) + logger := n.logger.WithValues("ConfigMap", configMap.ObjectMeta.Name, "Namespace", configMap.ObjectMeta.Namespace) - if err := controllerutil.SetControllerReference(n.singleton, configMap, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, configMap, n.scheme); err != nil { return nil, err } found := &corev1.ConfigMap{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: configMap.ObjectMeta.Namespace, Name: configMap.ObjectMeta.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: configMap.ObjectMeta.Namespace, Name: configMap.ObjectMeta.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating") - err = n.rec.Client.Create(ctx, configMap) + err = n.client.Create(ctx, configMap) if err != nil { logger.Info("Couldn't create") return nil, fmt.Errorf("failed to create trusted CA bundle config map %q: %s", name, err) @@ -1983,7 +1983,7 @@ func TransformValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpec, } if validatorErr != nil { - n.rec.Log.Info("WARN: errors transforming the validator containers: %v", validatorErr) + n.logger.Info("WARN: errors transforming the validator containers: %v", validatorErr) } return nil @@ -2012,7 +2012,7 @@ func TransformSandboxValidator(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic } if validatorErr != nil { - n.rec.Log.Info("WARN: errors transforming the validator containers: %v", validatorErr) + n.logger.Info("WARN: errors transforming the validator containers: %v", validatorErr) } return nil @@ -2575,7 +2575,7 @@ func transformGDSContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicySpe continue } if config.GPUDirectStorage == nil || !config.GPUDirectStorage.IsEnabled() { - n.rec.Log.Info("GPUDirect Storage is disabled") + n.logger.Info("GPUDirect Storage is disabled") // remove nvidia-fs sidecar container from driver Daemonset if GDS is not enabled obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers[:i], obj.Spec.Template.Spec.Containers[i+1:]...) return nil @@ -2657,7 +2657,7 @@ func transformGDRCopyContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolic continue } if config.GDRCopy == nil || !config.GDRCopy.IsEnabled() { - n.rec.Log.Info("GDRCopy is disabled") + n.logger.Info("GDRCopy is disabled") // remove nvidia-gdrcopy sidecar container from driver Daemonset if gdrcopy is not enabled obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers[:i], obj.Spec.Template.Spec.Containers[i+1:]...) return nil @@ -2781,7 +2781,7 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu if !n.ocpDriverToolkit.enabled { if n.ocpDriverToolkit.requested { - n.rec.Log.Info("OpenShift DriverToolkit was requested but could not be enabled (dependencies missing)") + n.logger.Info("OpenShift DriverToolkit was requested but could not be enabled (dependencies missing)") } /* remove OpenShift Driver Toolkit side-car container from the Driver DaemonSet */ @@ -2821,18 +2821,18 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu if config.GPUDirectStorage != nil && config.GPUDirectStorage.IsEnabled() { setContainerEnv(driverToolkitContainer, "GDS_ENABLED", "true") - n.rec.Log.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDS_ENABLED", config.GPUDirectStorage.IsEnabled()) + n.logger.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDS_ENABLED", config.GPUDirectStorage.IsEnabled()) } if config.GDRCopy != nil && config.GDRCopy.IsEnabled() { setContainerEnv(driverToolkitContainer, "GDRCOPY_ENABLED", "true") - n.rec.Log.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDRCOPY_ENABLED", "true") + n.logger.V(2).Info("transformOpenShiftDriverToolkitContainer", "GDRCOPY_ENABLED", "true") } image := n.ocpDriverToolkit.rhcosDriverToolkitImages[n.ocpDriverToolkit.currentRhcosVersion] if image != "" { driverToolkitContainer.Image = image - n.rec.Log.Info("DriverToolkit", "image", driverToolkitContainer.Image) + n.logger.Info("DriverToolkit", "image", driverToolkitContainer.Image) } else { /* RHCOS tag missing in the Driver-Toolkit imagestream, setup fallback */ obj.ObjectMeta.Labels["openshift.driver-toolkit.rhcos-image-missing"] = "true" @@ -2843,7 +2843,7 @@ func transformOpenShiftDriverToolkitContainer(obj *appsv1.DaemonSet, config *gpu setContainerEnv(mainContainer, "RHCOS_VERSION", rhcosVersion) setContainerEnv(driverToolkitContainer, "RHCOS_IMAGE_MISSING", "true") - n.rec.Log.Info("WARNING: DriverToolkit image tag missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) + n.logger.Info("WARNING: DriverToolkit image tag missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) } /* prepare the main container to start from the DriverToolkit entrypoint */ @@ -2998,7 +2998,7 @@ func createConfigMapVolumeMounts(n ClusterPolicyController, configMapName string // get the ConfigMap cm := &corev1.ConfigMap{} opts := client.ObjectKey{Namespace: n.operatorNamespace, Name: configMapName} - err := n.rec.Client.Get(ctx, opts, cm) + err := n.client.Get(ctx, opts, cm) if err != nil { return nil, nil, fmt.Errorf("ERROR: could not get ConfigMap %s from client: %v", configMapName, err) } @@ -3233,7 +3233,7 @@ func transformDriverContainer(obj *appsv1.DaemonSet, config *gpuv1.ClusterPolicy // set up subscription entitlements for RHEL(using K8s with a non-CRIO runtime) and SLES if (release["ID"] == "rhel" && n.openshift == "" && n.runtime != gpuv1.CRIO) || release["ID"] == "sles" { - n.rec.Log.Info("Mounting subscriptions into the driver container", "OS", release["ID"]) + n.logger.Info("Mounting subscriptions into the driver container", "OS", release["ID"]) pathToVolumeSource, err := getSubscriptionPathsToVolumeSources() if err != nil { return fmt.Errorf("ERROR: failed to get path items for subscription entitlements: %v", err) @@ -3431,19 +3431,19 @@ func isDeploymentReady(name string, n ClusterPolicyController) gpuv1.State { opts := []client.ListOption{ client.MatchingLabels{"app": name}, } - n.rec.Log.V(1).Info("Deployment", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(1).Info("Deployment", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &appsv1.DeploymentList{} - err := n.rec.Client.List(n.ctx, list, opts...) + err := n.client.List(n.ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get DeploymentList", err) + n.logger.Info("Could not get DeploymentList", err) } - n.rec.Log.V(1).Info("Deployment", "NumberOfDeployment", len(list.Items)) + n.logger.V(1).Info("Deployment", "NumberOfDeployment", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } ds := list.Items[0] - n.rec.Log.V(1).Info("Deployment", "NumberUnavailable", ds.Status.UnavailableReplicas) + n.logger.V(1).Info("Deployment", "NumberUnavailable", ds.Status.UnavailableReplicas) if ds.Status.UnavailableReplicas != 0 { return gpuv1.NotReady @@ -3455,19 +3455,19 @@ func isDeploymentReady(name string, n ClusterPolicyController) gpuv1.State { func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { ctx := n.ctx ds := &appsv1.DaemonSet{} - n.rec.Log.V(2).Info("checking daemonset for readiness", "name", name) - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: n.operatorNamespace, Name: name}, ds) + n.logger.V(2).Info("checking daemonset for readiness", "name", name) + err := n.client.Get(ctx, types.NamespacedName{Namespace: n.operatorNamespace, Name: name}, ds) if err != nil { - n.rec.Log.Error(err, "could not get daemonset", "name", name) + n.logger.Error(err, "could not get daemonset", "name", name) } if ds.Status.DesiredNumberScheduled == 0 { - n.rec.Log.V(2).Info("Daemonset has desired pods of 0", "name", name) + n.logger.V(2).Info("Daemonset has desired pods of 0", "name", name) return gpuv1.Ready } if ds.Status.NumberUnavailable != 0 { - n.rec.Log.Info("daemonset not ready", "name", name) + n.logger.Info("daemonset not ready", "name", name) return gpuv1.NotReady } @@ -3478,14 +3478,14 @@ func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { opts := []client.ListOption{client.MatchingLabels(ds.Spec.Template.ObjectMeta.Labels)} - n.rec.Log.V(2).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(2).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &corev1.PodList{} - err = n.rec.Client.List(ctx, list, opts...) + err = n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get PodList", err) + n.logger.Info("Could not get PodList", err) return gpuv1.NotReady } - n.rec.Log.V(2).Info("Pod", "NumberOfPods", len(list.Items)) + n.logger.V(2).Info("Pod", "NumberOfPods", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } @@ -3493,21 +3493,21 @@ func isDaemonSetReady(name string, n ClusterPolicyController) gpuv1.State { dsPods := getPodsOwnedbyDaemonset(ds, list.Items, n) daemonsetRevisionHash, err := getDaemonsetControllerRevisionHash(ctx, ds, n) if err != nil { - n.rec.Log.Error( + n.logger.Error( err, "Failed to get daemonset template revision hash", "daemonset", ds) return gpuv1.NotReady } - n.rec.Log.V(2).Info("daemonset template revision hash", "hash", daemonsetRevisionHash) + n.logger.V(2).Info("daemonset template revision hash", "hash", daemonsetRevisionHash) for _, pod := range dsPods { pod := pod podRevisionHash, err := getPodControllerRevisionHash(ctx, &pod) if err != nil { - n.rec.Log.Error( + n.logger.Error( err, "Failed to get pod template revision hash", "pod", pod) return gpuv1.NotReady } - n.rec.Log.V(2).Info("pod template revision hash", "hash", podRevisionHash) + n.logger.V(2).Info("pod template revision hash", "hash", podRevisionHash) // check if the revision hashes are matching and pod is in running state if podRevisionHash != daemonsetRevisionHash || pod.Status.Phase != "Running" { @@ -3534,13 +3534,13 @@ func getPodsOwnedbyDaemonset(ds *appsv1.DaemonSet, pods []corev1.Pod, n ClusterP dsPodList := []corev1.Pod{} for _, pod := range pods { if pod.OwnerReferences == nil || len(pod.OwnerReferences) < 1 { - n.rec.Log.Info("Driver Pod has no owner DaemonSet", "pod", pod.Name) + n.logger.Info("Driver Pod has no owner DaemonSet", "pod", pod.Name) continue } - n.rec.Log.V(2).Info("Pod", "pod", pod.Name, "owner", pod.OwnerReferences[0].Name) + n.logger.V(2).Info("Pod", "pod", pod.Name, "owner", pod.OwnerReferences[0].Name) if ds.UID != pod.OwnerReferences[0].UID { - n.rec.Log.Info("Driver Pod is not owned by a Driver DaemonSet", + n.logger.Info("Driver Pod is not owned by a Driver DaemonSet", "pod", pod, "actual owner", pod.OwnerReferences[0]) continue } @@ -3564,12 +3564,12 @@ func getDaemonsetControllerRevisionHash(ctx context.Context, daemonset *appsv1.D client.InNamespace(n.operatorNamespace), } list := &appsv1.ControllerRevisionList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return "", fmt.Errorf("error getting controller revision list for daemonset %s: %v", daemonset.Name, err) } - n.rec.Log.V(2).Info("obtained controller revisions", "Daemonset", daemonset.Name, "len", len(list.Items)) + n.logger.V(2).Info("obtained controller revisions", "Daemonset", daemonset.Name, "len", len(list.Items)) var revisions []appsv1.ControllerRevision for _, controllerRevision := range list.Items { @@ -3598,11 +3598,11 @@ func Deployment(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].Deployment.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Deployment", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Deployment", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -3610,14 +3610,14 @@ func Deployment(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } - if err := n.rec.Client.Create(ctx, obj); err != nil { + if err := n.client.Create(ctx, obj); err != nil { if apierrors.IsAlreadyExists(err) { logger.Info("Found Resource, updating...") - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -3637,21 +3637,21 @@ func ocpHasDriverToolkitImageStream(n *ClusterPolicyController) (bool, error) { found := &apiimagev1.ImageStream{} name := "driver-toolkit" namespace := "openshift" - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, found) if err != nil { if apierrors.IsNotFound(err) { - n.rec.Log.Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream not found", + n.logger.Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream not found", "Name", name, "Namespace", namespace) return false, nil } - n.rec.Log.Info("Couldn't get the driver-toolkit imagestream", "Error", err) + n.logger.Info("Couldn't get the driver-toolkit imagestream", "Error", err) return false, err } - n.rec.Log.V(1).Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream found") + n.logger.V(1).Info("ocpHasDriverToolkitImageStream: driver-toolkit imagestream found") isBroken := false for _, tag := range found.Spec.Tags { if tag.Name == "" { @@ -3661,11 +3661,11 @@ func ocpHasDriverToolkitImageStream(n *ClusterPolicyController) (bool, error) { if tag.Name == "latest" || tag.From == nil { continue } - n.rec.Log.V(1).Info("ocpHasDriverToolkitImageStream: tag", tag.Name, tag.From.Name) + n.logger.V(1).Info("ocpHasDriverToolkitImageStream: tag", tag.Name, tag.From.Name) n.ocpDriverToolkit.rhcosDriverToolkitImages[tag.Name] = tag.From.Name } if isBroken { - n.rec.Log.Info("WARNING: ocpHasDriverToolkitImageStream: driver-toolkit imagestream is broken, see RHBZ#2015024") + n.logger.Info("WARNING: ocpHasDriverToolkitImageStream: driver-toolkit imagestream is broken, see RHBZ#2015024") n.operatorMetrics.openshiftDriverToolkitIsBroken.Set(1) } else { @@ -3682,7 +3682,7 @@ func (n ClusterPolicyController) cleanupAllDriverDaemonSets(ctx context.Context) // is allowed when specifying ListOptions or DeleteOptions. // See GH issue: https://github.com/kubernetes-sigs/controller-runtime/issues/612 list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, client.MatchingFields{clusterPolicyControllerIndexKey: n.singleton.Name}) + err := n.client.List(ctx, list, client.MatchingFields{clusterPolicyControllerIndexKey: n.singleton.Name}) if err != nil { return fmt.Errorf("failed to list all NVIDIA driver daemonsets owned by ClusterPolicy: %w", err) } @@ -3691,8 +3691,8 @@ func (n ClusterPolicyController) cleanupAllDriverDaemonSets(ctx context.Context) ds := ds // filter out DaemonSets which are not the NVIDIA driver/vgpu-manager if strings.HasPrefix(ds.Name, commonDriverDaemonsetName) || strings.HasPrefix(ds.Name, commonVGPUManagerDaemonsetName) { - n.rec.Log.Info("Deleting NVIDIA driver daemonset owned by ClusterPolicy", "Name", ds.Name) - err = n.rec.Client.Delete(ctx, &ds) + n.logger.Info("Deleting NVIDIA driver daemonset owned by ClusterPolicy", "Name", ds.Name) + err = n.client.Delete(ctx, &ds) if err != nil { return fmt.Errorf("error deleting NVIDIA driver daemonset: %w", err) } @@ -3712,9 +3712,9 @@ func (n ClusterPolicyController) cleanupStalePrecompiledDaemonsets(ctx context.C }, } list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Error(err, "could not get daemonset list") + n.logger.Error(err, "could not get daemonset list") return err } @@ -3722,21 +3722,21 @@ func (n ClusterPolicyController) cleanupStalePrecompiledDaemonsets(ctx context.C name := list.Items[idx].ObjectMeta.Name desiredNumberScheduled := list.Items[idx].Status.DesiredNumberScheduled - n.rec.Log.V(1).Info("Driver DaemonSet found", + n.logger.V(1).Info("Driver DaemonSet found", "Name", name, "desiredNumberScheduled", desiredNumberScheduled) if desiredNumberScheduled != 0 { - n.rec.Log.Info("Driver DaemonSet active, keep it.", + n.logger.Info("Driver DaemonSet active, keep it.", "Name", name, "Status.DesiredNumberScheduled", desiredNumberScheduled) continue } - n.rec.Log.Info("Delete Driver DaemonSet", "Name", name) + n.logger.Info("Delete Driver DaemonSet", "Name", name) - err = n.rec.Client.Delete(ctx, &list.Items[idx]) + err = n.client.Delete(ctx, &list.Items[idx]) if err != nil { - n.rec.Log.Info("ERROR: Could not get delete DaemonSet", + n.logger.Info("ERROR: Could not get delete DaemonSet", "Name", name, "Error", err) } } @@ -3750,23 +3750,23 @@ func (n ClusterPolicyController) cleanupStalePrecompiledDaemonsets(ctx context.C func precompiledDriverDaemonsets(ctx context.Context, n ClusterPolicyController) (gpuv1.State, []error) { overallState := gpuv1.Ready var errs []error - n.rec.Log.Info("cleaning any stale precompiled driver daemonsets") + n.logger.Info("cleaning any stale precompiled driver daemonsets") err := n.cleanupStalePrecompiledDaemonsets(ctx) if err != nil { return gpuv1.NotReady, append(errs, err) } - n.rec.Log.V(1).Info("preparing pre-compiled driver daemonsets") + n.logger.V(1).Info("preparing pre-compiled driver daemonsets") for kernelVersion, os := range n.kernelVersionMap { // set current kernel version n.currentKernelVersion = kernelVersion - n.rec.Log.Info("preparing pre-compiled driver daemonset", + n.logger.Info("preparing pre-compiled driver daemonset", "version", n.currentKernelVersion, "os", os) state, err := DaemonSet(n) if state != gpuv1.Ready { - n.rec.Log.Info("pre-compiled driver daemonset not ready", + n.logger.Info("pre-compiled driver daemonset not ready", "version", n.currentKernelVersion, "state", state) overallState = state } @@ -3790,7 +3790,7 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) return gpuv1.NotReady, err } - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcos", n.ocpDriverToolkit.rhcosVersions) overallState := gpuv1.Ready @@ -3799,12 +3799,12 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) for rhcosVersion := range n.ocpDriverToolkit.rhcosVersions { n.ocpDriverToolkit.currentRhcosVersion = rhcosVersion - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcosVersion", n.ocpDriverToolkit.currentRhcosVersion) state, err := DaemonSet(n) - n.rec.Log.V(1).Info("preparing DriverToolkit DaemonSet", + n.logger.V(1).Info("preparing DriverToolkit DaemonSet", "rhcosVersion", n.ocpDriverToolkit.currentRhcosVersion, "state", state) if state != gpuv1.Ready { overallState = state @@ -3825,7 +3825,7 @@ func (n ClusterPolicyController) ocpDriverToolkitDaemonSets(ctx context.Context) if image != "" { continue } - n.rec.Log.Info("WARNINGs: RHCOS driver-toolkit image missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) + n.logger.Info("WARNINGs: RHCOS driver-toolkit image missing. Version-specific fallback mode enabled.", "rhcosVersion", rhcosVersion) tagsMissing = true } if tagsMissing { @@ -3852,9 +3852,9 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont } list := &appsv1.DaemonSetList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("ERROR: Could not get DaemonSetList", "Error", err) + n.logger.Info("ERROR: Could not get DaemonSetList", "Error", err) return err } @@ -3864,30 +3864,30 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont clusterHasRhcosVersion, clusterOk := n.ocpDriverToolkit.rhcosVersions[dsRhcosVersion] desiredNumberScheduled := list.Items[idx].Status.DesiredNumberScheduled - n.rec.Log.V(1).Info("Driver DaemonSet found", + n.logger.V(1).Info("Driver DaemonSet found", "Name", name, "dsRhcosVersion", dsRhcosVersion, "clusterHasRhcosVersion", clusterHasRhcosVersion, "desiredNumberScheduled", desiredNumberScheduled) if desiredNumberScheduled != 0 { - n.rec.Log.Info("Driver DaemonSet active, keep it.", + n.logger.Info("Driver DaemonSet active, keep it.", "Name", name, "Status.DesiredNumberScheduled", desiredNumberScheduled) continue } if !versionOk { - n.rec.Log.Info("WARNING: Driver DaemonSet doesn't have DriverToolkit version label", + n.logger.Info("WARNING: Driver DaemonSet doesn't have DriverToolkit version label", "Name", name, "Label", ocpDriverToolkitVersionLabel, ) } else { switch { case !clusterOk: - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version NOT part of the cluster", + n.logger.V(1).Info("Driver DaemonSet RHCOS version NOT part of the cluster", "Name", name, "RHCOS version", dsRhcosVersion, ) case clusterHasRhcosVersion: - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version is part of the cluster, keep it.", + n.logger.V(1).Info("Driver DaemonSet RHCOS version is part of the cluster, keep it.", "Name", name, "RHCOS version", dsRhcosVersion, ) @@ -3897,16 +3897,16 @@ func (n ClusterPolicyController) ocpCleanupStaleDriverToolkitDaemonSets(ctx cont continue default: /* clusterHasRhcosVersion == false */ // currently unexpected - n.rec.Log.V(1).Info("Driver DaemonSet RHCOS version marked for deletion", + n.logger.V(1).Info("Driver DaemonSet RHCOS version marked for deletion", "Name", name, "RHCOS version", dsRhcosVersion, ) } } - n.rec.Log.Info("Delete Driver DaemonSet", "Name", name) - err = n.rec.Client.Delete(ctx, &list.Items[idx]) + n.logger.Info("Delete Driver DaemonSet", "Name", name) + err = n.client.Delete(ctx, &list.Items[idx]) if err != nil { - n.rec.Log.Info("ERROR: Could not get delete DaemonSet", + n.logger.Info("ERROR: Could not get delete DaemonSet", "Name", name, "Error", err) return err } @@ -4035,22 +4035,22 @@ func (n ClusterPolicyController) cleanupDriverDaemonsets(ctx context.Context, se var opts = []client.ListOption{client.MatchingLabels{searchKey: searchValue}} dsList := &appsv1.DaemonSetList{} - if err := n.rec.Client.List(ctx, dsList, opts...); err != nil { - n.rec.Log.Error(err, "Could not get DaemonSetList") + if err := n.client.List(ctx, dsList, opts...); err != nil { + n.logger.Error(err, "Could not get DaemonSetList") return 0, err } var lastErr error for idx := range dsList.Items { - n.rec.Log.Info("Delete DaemonSet", + n.logger.Info("Delete DaemonSet", "Name", dsList.Items[idx].ObjectMeta.Name, ) // ignore daemonsets that doesn't match the required name if !strings.HasPrefix(dsList.Items[idx].ObjectMeta.Name, namePrefix) { continue } - if err := n.rec.Client.Delete(ctx, &dsList.Items[idx]); err != nil { - n.rec.Log.Error(err, "Could not get delete DaemonSet", + if err := n.client.Delete(ctx, &dsList.Items[idx]); err != nil { + n.logger.Error(err, "Could not get delete DaemonSet", "Name", dsList.Items[idx].ObjectMeta.Name) lastErr = err } @@ -4062,8 +4062,8 @@ func (n ClusterPolicyController) cleanupDriverDaemonsets(ctx context.Context, se } podList := &corev1.PodList{} - if err := n.rec.Client.List(ctx, podList, opts...); err != nil { - n.rec.Log.Info("ERROR: Could not get PodList", "Error", err) + if err := n.client.List(ctx, podList, opts...); err != nil { + n.logger.Info("ERROR: Could not get PodList", "Error", err) return 0, err } @@ -4085,11 +4085,11 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].DaemonSet.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("DaemonSet", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("DaemonSet", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4163,7 +4163,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.NotReady, err } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { logger.Info("SetControllerReference failed", "Error", err) return gpuv1.NotReady, err } @@ -4186,7 +4186,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { } found := &appsv1.DaemonSet{} - err = n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err = n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("DaemonSet not found, creating", "Name", obj.Name, @@ -4195,7 +4195,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { hashStr := getDaemonsetHash(obj) // add annotation to the Daemonset with hash value during creation obj.Annotations[NvidiaAnnotationHashKey] = hashStr - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create DaemonSet", "Name", obj.Name, @@ -4214,7 +4214,7 @@ func DaemonSet(n ClusterPolicyController) (gpuv1.State, error) { changed := isDaemonsetSpecChanged(found, obj) if changed { logger.Info("DaemonSet is different, updating", "name", obj.ObjectMeta.Name) - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { return gpuv1.NotReady, err } @@ -4277,13 +4277,13 @@ func isPodReady(name string, n ClusterPolicyController, phase corev1.PodPhase) g ctx := n.ctx opts := []client.ListOption{&client.MatchingLabels{"app": name}} - n.rec.Log.V(1).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) + n.logger.V(1).Info("Pod", "LabelSelector", fmt.Sprintf("app=%s", name)) list := &corev1.PodList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get PodList", err) + n.logger.Info("Could not get PodList", err) } - n.rec.Log.V(1).Info("Pod", "NumberOfPods", len(list.Items)) + n.logger.V(1).Info("Pod", "NumberOfPods", len(list.Items)) if len(list.Items) == 0 { return gpuv1.NotReady } @@ -4291,10 +4291,10 @@ func isPodReady(name string, n ClusterPolicyController, phase corev1.PodPhase) g pd := list.Items[0] if pd.Status.Phase != phase { - n.rec.Log.V(1).Info("Pod", "Phase", pd.Status.Phase, "!=", phase) + n.logger.V(1).Info("Pod", "Phase", pd.Status.Phase, "!=", phase) return gpuv1.NotReady } - n.rec.Log.V(1).Info("Pod", "Phase", pd.Status.Phase, "==", phase) + n.logger.V(1).Info("Pod", "Phase", pd.Status.Phase, "==", phase) return gpuv1.Ready } @@ -4305,11 +4305,11 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) obj := n.resources[state].SecurityContextConstraints.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("SecurityContextConstraints", obj.Name, "Namespace", "default") + logger := n.logger.WithValues("SecurityContextConstraints", obj.Name, "Namespace", "default") // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4329,15 +4329,15 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) obj.AllowHostNetwork = true } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &secv1.SecurityContextConstraints{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4350,7 +4350,7 @@ func SecurityContextConstraints(n ClusterPolicyController) (gpuv1.State, error) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4366,11 +4366,11 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("Service", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("Service", obj.Name, "Namespace", obj.Namespace) // Check if state is disabled and cleanup resource if exists if !n.isStateEnabled(n.stateNames[n.idx]) { - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4378,15 +4378,15 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { return gpuv1.Disabled, nil } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &corev1.Service{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4400,7 +4400,7 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { obj.ResourceVersion = found.ResourceVersion obj.Spec.ClusterIP = found.Spec.ClusterIP - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4410,7 +4410,7 @@ func Service(n ClusterPolicyController) (gpuv1.State, error) { func crdExists(n ClusterPolicyController, name string) (bool, error) { crd := &apiextensionsv1.CustomResourceDefinition{} - err := n.rec.Client.Get(n.ctx, client.ObjectKey{Name: name}, crd) + err := n.client.Get(n.ctx, client.ObjectKey{Name: name}, crd) if err != nil && apierrors.IsNotFound(err) { return false, nil } else if err != nil { @@ -4427,7 +4427,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].ServiceMonitor.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("ServiceMonitor", obj.Name, "Namespace", obj.Namespace) + logger := n.logger.WithValues("ServiceMonitor", obj.Name, "Namespace", obj.Namespace) // Check if ServiceMonitor is a valid kind serviceMonitorCRDExists, err := crdExists(n, ServiceMonitorCRDName) @@ -4440,7 +4440,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { if !serviceMonitorCRDExists { return gpuv1.Ready, nil } - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4455,7 +4455,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { if !serviceMonitorCRDExists { return gpuv1.Ready, nil } - err := n.rec.Client.Delete(ctx, obj) + err := n.client.Delete(ctx, obj) if err != nil && !apierrors.IsNotFound(err) { logger.Info("Couldn't delete", "Error", err) return gpuv1.NotReady, err @@ -4503,15 +4503,15 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { obj.Spec.NamespaceSelector.MatchNames[idx] = obj.Namespace } - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &promv1.ServiceMonitor{} - err = n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err = n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4524,7 +4524,7 @@ func ServiceMonitor(n ClusterPolicyController) (gpuv1.State, error) { logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4548,17 +4548,17 @@ func transformRuntimeClassLegacy(n ClusterPolicyController, spec nodev1.RuntimeC obj.Labels = spec.Labels - logger := n.rec.Log.WithValues("RuntimeClass", obj.Name) + logger := n.logger.WithValues("RuntimeClass", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1beta1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4571,7 +4571,7 @@ func transformRuntimeClassLegacy(n ClusterPolicyController, spec nodev1.RuntimeC logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4595,17 +4595,17 @@ func transformRuntimeClass(n ClusterPolicyController, spec nodev1.RuntimeClass) obj.Labels = spec.Labels - logger := n.rec.Log.WithValues("RuntimeClass", obj.Name) + logger := n.logger.WithValues("RuntimeClass", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4618,7 +4618,7 @@ func transformRuntimeClass(n ClusterPolicyController, spec nodev1.RuntimeClass) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4634,20 +4634,20 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) // Get all existing Kata RuntimeClasses opts := []client.ListOption{&client.MatchingLabels{"nvidia.com/kata-runtime-class": "true"}} list := &nodev1.RuntimeClassList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { - n.rec.Log.Info("Could not get Kata RuntimeClassList", err) + n.logger.Info("Could not get Kata RuntimeClassList", err) return gpuv1.NotReady, fmt.Errorf("error getting kata RuntimeClassList: %v", err) } - n.rec.Log.V(1).Info("Kata RuntimeClasses", "Number", len(list.Items)) + n.logger.V(1).Info("Kata RuntimeClasses", "Number", len(list.Items)) if !config.KataManager.IsEnabled() { // Delete all Kata RuntimeClasses - n.rec.Log.Info("Kata Manager disabled, deleting all Kata RuntimeClasses") + n.logger.Info("Kata Manager disabled, deleting all Kata RuntimeClasses") for _, rc := range list.Items { rc := rc - n.rec.Log.V(1).Info("Deleting Kata RuntimeClass", "Name", rc.Name) - err := n.rec.Client.Delete(ctx, &rc) + n.logger.V(1).Info("Deleting Kata RuntimeClass", "Name", rc.Name) + err := n.client.Delete(ctx, &rc) if err != nil { return gpuv1.NotReady, fmt.Errorf("error deleting kata RuntimeClass '%s': %v", rc.Name, err) } @@ -4665,8 +4665,8 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) for _, rc := range list.Items { if _, ok := rcNames[rc.Name]; !ok { rc := rc - n.rec.Log.Info("Deleting Kata RuntimeClass", "Name", rc.Name) - err := n.rec.Client.Delete(ctx, &rc) + n.logger.Info("Deleting Kata RuntimeClass", "Name", rc.Name) + err := n.client.Delete(ctx, &rc) if err != nil { return gpuv1.NotReady, fmt.Errorf("error deleting kata RuntimeClass '%s': %v", rc.Name, err) } @@ -4676,7 +4676,7 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) // Using kata RuntimClass template, create / update RuntimeClass objects specified in KataManager configuration template := n.resources[state].RuntimeClasses[0] for _, rc := range config.KataManager.Config.RuntimeClasses { - logger := n.rec.Log.WithValues("RuntimeClass", rc.Name) + logger := n.logger.WithValues("RuntimeClass", rc.Name) if rc.Name == config.Operator.RuntimeClass { return gpuv1.NotReady, fmt.Errorf("error creating kata runtimeclass '%s' as it conflicts with the runtimeclass used for the gpu-operator operand pods itself", rc.Name) @@ -4699,15 +4699,15 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) } obj.Scheduling.NodeSelector = nodeSelector - if err := controllerutil.SetControllerReference(n.singleton, &obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, &obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &nodev1.RuntimeClass{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: "", Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, &obj) + err = n.client.Create(ctx, &obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4720,7 +4720,7 @@ func transformKataRuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, &obj) + err = n.client.Update(ctx, &obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err @@ -4748,9 +4748,9 @@ func RuntimeClasses(n ClusterPolicyController) (gpuv1.State, error) { // 'nvidia-legacy' runtime classes. Delete these objects if they were // previously created. if !n.singleton.Spec.CDI.IsEnabled() && (obj.Name == "nvidia-cdi" || obj.Name == "nvidia-legacy") { - err := n.rec.Client.Delete(n.ctx, &obj) + err := n.client.Delete(n.ctx, &obj) if err != nil && !apierrors.IsNotFound(err) { - n.rec.Log.Info("Couldn't delete", "RuntimeClass", obj.Name, "Error", err) + n.logger.Info("Couldn't delete", "RuntimeClass", obj.Name, "Error", err) return gpuv1.NotReady, err } continue @@ -4773,17 +4773,17 @@ func PrometheusRule(n ClusterPolicyController) (gpuv1.State, error) { obj := n.resources[state].PrometheusRule.DeepCopy() obj.Namespace = n.operatorNamespace - logger := n.rec.Log.WithValues("PrometheusRule", obj.Name) + logger := n.logger.WithValues("PrometheusRule", obj.Name) - if err := controllerutil.SetControllerReference(n.singleton, obj, n.rec.Scheme); err != nil { + if err := controllerutil.SetControllerReference(n.singleton, obj, n.scheme); err != nil { return gpuv1.NotReady, err } found := &promv1.PrometheusRule{} - err := n.rec.Client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) + err := n.client.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, found) if err != nil && apierrors.IsNotFound(err) { logger.Info("Not found, creating...") - err = n.rec.Client.Create(ctx, obj) + err = n.client.Create(ctx, obj) if err != nil { logger.Info("Couldn't create", "Error", err) return gpuv1.NotReady, err @@ -4796,7 +4796,7 @@ func PrometheusRule(n ClusterPolicyController) (gpuv1.State, error) { logger.Info("Found Resource, updating...") obj.ResourceVersion = found.ResourceVersion - err = n.rec.Client.Update(ctx, obj) + err = n.client.Update(ctx, obj) if err != nil { logger.Info("Couldn't update", "Error", err) return gpuv1.NotReady, err diff --git a/controllers/object_controls_test.go b/controllers/object_controls_test.go index 44c8f4195..50823116d 100644 --- a/controllers/object_controls_test.go +++ b/controllers/object_controls_test.go @@ -67,7 +67,6 @@ type testConfig struct { var ( cfg *testConfig clusterPolicyController ClusterPolicyController - clusterPolicyReconciler ClusterPolicyReconciler clusterPolicy gpuv1.ClusterPolicy boolTrue *bool boolFalse *bool @@ -201,16 +200,12 @@ func setup() error { } ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) - clusterPolicyReconciler = ClusterPolicyReconciler{ - Client: client, - Log: ctrl.Log.WithName("controller").WithName("ClusterPolicy"), - Scheme: s, - } - clusterPolicyController = ClusterPolicyController{ ctx: ctx, singleton: cp, - rec: &clusterPolicyReconciler, + client: client, + logger: ctrl.Log.WithName("controller").WithName("ClusterPolicy"), + scheme: s, } clusterPolicyController.operatorMetrics = initOperatorMetrics(&clusterPolicyController) @@ -271,7 +266,7 @@ func newCluster(nodes int, s *runtime.Scheme) (client.Client, error) { // updateClusterPolicy updates an existing ClusterPolicy instance func updateClusterPolicy(n *ClusterPolicyController, cp *gpuv1.ClusterPolicy) error { n.singleton = cp - err := n.rec.Client.Update(n.ctx, cp) + err := n.client.Update(n.ctx, cp) if err != nil && !apierrors.IsConflict(err) { return fmt.Errorf("failed to update ClusterPolicy: %v", err) } @@ -285,7 +280,7 @@ func removeState(n *ClusterPolicyController, idx int) error { var err error for _, res := range kubernetesResources { // TODO: use n.operatorNamespace once MR is merged - err = n.rec.Client.DeleteAllOf(n.ctx, res) + err = n.client.DeleteAllOf(n.ctx, res) if err != nil { return fmt.Errorf("error deleting objects from k8s client: %v", err) } @@ -414,7 +409,7 @@ func testDaemonsetCommon(t *testing.T, cp *gpuv1.ClusterPolicy, component string client.MatchingLabels{"app": dsLabel}, } list := &appsv1.DaemonSetList{} - err = clusterPolicyController.rec.Client.List(ctx, list, opts...) + err = clusterPolicyController.client.List(ctx, list, opts...) if err != nil { t.Fatalf("could not get DaemonSetList from client: %v", err) } diff --git a/controllers/resource_manager.go b/controllers/resource_manager.go index af4c1dbad..2789bfe3d 100644 --- a/controllers/resource_manager.go +++ b/controllers/resource_manager.go @@ -67,7 +67,7 @@ func filePathWalkDir(n *ClusterPolicyController, root string) ([]string, error) var files []string err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if err != nil { - n.rec.Log.V(1).Info("error in filepath.Walk on %s: %v", root, err) + n.logger.V(1).Info("error in filepath.Walk on %s: %v", root, err) return nil } if !info.IsDir() { @@ -103,7 +103,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c res := Resources{} ctrl := controlFunc{} - n.rec.Log.Info("Getting assets from: ", "path:", path) + n.logger.Info("Getting assets from: ", "path:", path) manifests := getAssetsFrom(n, path, n.openshift) s := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme.Scheme, @@ -115,7 +115,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c slce := strings.Split(kind, ":") kind = strings.TrimSpace(slce[1]) - n.rec.Log.V(1).Info("Looking for ", "Kind", kind, "in path:", path) + n.logger.V(1).Info("Looking for ", "Kind", kind, "in path:", path) switch kind { case "ServiceAccount": @@ -181,7 +181,7 @@ func addResourcesControls(n *ClusterPolicyController, path string) (Resources, c panicIfError(err) ctrl = append(ctrl, PrometheusRule) default: - n.rec.Log.Info("Unknown Resource", "Manifest", m, "Kind", kind) + n.logger.Info("Unknown Resource", "Manifest", m, "Kind", kind) } } diff --git a/controllers/state_manager.go b/controllers/state_manager.go index 796694780..c8a1e9d2d 100644 --- a/controllers/state_manager.go +++ b/controllers/state_manager.go @@ -23,23 +23,19 @@ import ( "path/filepath" "strings" - secv1 "github.com/openshift/api/security/v1" - promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" - "github.com/go-logr/logr" apiconfigv1 "github.com/openshift/api/config/v1" - apiimagev1 "github.com/openshift/api/image/v1" configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" "golang.org/x/mod/semver" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/discovery" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + + gpuv1 "github.com/NVIDIA/gpu-operator/api/v1" ) const ( @@ -145,15 +141,18 @@ type OpenShiftDriverToolkit struct { // ClusterPolicyController represents clusterpolicy controller spec for GPU operator type ClusterPolicyController struct { + client client.Client + ctx context.Context singleton *gpuv1.ClusterPolicy + logger logr.Logger + scheme *runtime.Scheme operatorNamespace string resources []Resources controls []controlFunc stateNames []string operatorMetrics *OperatorMetrics - rec *ClusterPolicyReconciler idx int kernelVersionMap map[string]string currentKernelVersion string @@ -425,7 +424,7 @@ func (n *ClusterPolicyController) applyDriverAutoUpgradeAnnotation() error { // fetch all nodes opts := []client.ListOption{} list := &corev1.NodeList{} - err := n.rec.Client.List(n.ctx, list, opts...) + err := n.client.List(n.ctx, list, opts...) if err != nil { return fmt.Errorf("Unable to list nodes to check annotations, err %s", err.Error()) } @@ -465,9 +464,9 @@ func (n *ClusterPolicyController) applyDriverAutoUpgradeAnnotation() error { // remove annotation if value is null delete(node.ObjectMeta.Annotations, driverAutoUpgradeAnnotationKey) } - err := n.rec.Client.Update(n.ctx, &node) + err := n.client.Update(n.ctx, &node) if err != nil { - n.rec.Log.Info("Failed to update node state annotation on a node", + n.logger.Info("Failed to update node state annotation on a node", "node", node.Name, "annotationKey", driverAutoUpgradeAnnotationKey, "annotationValue", value, "error", err) @@ -484,7 +483,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { // fetch all nodes opts := []client.ListOption{} list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return false, 0, fmt.Errorf("Unable to list nodes to check labels, err %s", err.Error()) } @@ -501,16 +500,16 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } config, err := getWorkloadConfig(labels, n.sandboxEnabled) if err != nil { - n.rec.Log.Info("WARNING: failed to get GPU workload config for node; using default", + n.logger.Info("WARNING: failed to get GPU workload config for node; using default", "NodeName", node.ObjectMeta.Name, "SandboxEnabled", n.sandboxEnabled, "Error", err, "defaultGPUWorkloadConfig", defaultGPUWorkloadConfig) } - n.rec.Log.Info("GPU workload configuration", "NodeName", node.ObjectMeta.Name, "GpuWorkloadConfig", config) - gpuWorkloadConfig := &gpuWorkloadConfiguration{config, node.ObjectMeta.Name, n.rec.Log} + n.logger.Info("GPU workload configuration", "NodeName", node.ObjectMeta.Name, "GpuWorkloadConfig", config) + gpuWorkloadConfig := &gpuWorkloadConfiguration{config, node.ObjectMeta.Name, n.logger} if !hasCommonGPULabel(labels) && hasGPULabels(labels) { - n.rec.Log.Info("Node has GPU(s)", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Node has GPU(s)", "NodeName", node.ObjectMeta.Name) // label the node with common Nvidia GPU label - n.rec.Log.Info("Setting node label", "NodeName", node.ObjectMeta.Name, "Label", commonGPULabelKey, "Value", commonGPULabelValue) + n.logger.Info("Setting node label", "NodeName", node.ObjectMeta.Name, "Label", commonGPULabelKey, "Value", commonGPULabelValue) labels[commonGPULabelKey] = commonGPULabelValue // update node labels node.SetLabels(labels) @@ -518,10 +517,10 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } else if hasCommonGPULabel(labels) && !hasGPULabels(labels) { // previously labelled node and no longer has GPU's // label node to reset common Nvidia GPU label - n.rec.Log.Info("Node no longer has GPUs", "NodeName", node.ObjectMeta.Name) - n.rec.Log.Info("Setting node label", "Label", commonGPULabelKey, "Value", "false") + n.logger.Info("Node no longer has GPUs", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Setting node label", "Label", commonGPULabelKey, "Value", "false") labels[commonGPULabelKey] = "false" - n.rec.Log.Info("Disabling all operands for node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Disabling all operands for node", "NodeName", node.ObjectMeta.Name) removeAllGPUStateLabels(labels) // update node labels node.SetLabels(labels) @@ -530,16 +529,16 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { if hasCommonGPULabel(labels) { // If node has GPU, then add state labels as per the workload type - n.rec.Log.Info("Checking GPU state labels on the node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Checking GPU state labels on the node", "NodeName", node.ObjectMeta.Name) if gpuWorkloadConfig.updateGPUStateLabels(labels) { - n.rec.Log.Info("Applying correct GPU state labels to the node", "NodeName", node.ObjectMeta.Name) + n.logger.Info("Applying correct GPU state labels to the node", "NodeName", node.ObjectMeta.Name) node.SetLabels(labels) updateLabels = true } // Disable MIG on the node explicitly where no MIG config is specified if n.singleton.Spec.MIGManager.IsEnabled() && hasMIGCapableGPU(labels) && !hasMIGConfigLabel(labels) { if n.singleton.Spec.MIGManager.Config != nil && n.singleton.Spec.MIGManager.Config.Default == migConfigDisabledValue { - n.rec.Log.Info("Setting MIG config label", "NodeName", node.ObjectMeta.Name, "Label", migConfigLabelKey, "Value", migConfigDisabledValue) + n.logger.Info("Setting MIG config label", "NodeName", node.ObjectMeta.Name, "Label", migConfigLabelKey, "Value", migConfigDisabledValue) labels[migConfigLabelKey] = migConfigDisabledValue node.SetLabels(labels) updateLabels = true @@ -553,12 +552,12 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { rhcosVersion, ok := labels[nfdOSTreeVersionLabelKey] if ok { n.ocpDriverToolkit.rhcosVersions[rhcosVersion] = true - n.rec.Log.V(1).Info("GPU node running RHCOS", + n.logger.V(1).Info("GPU node running RHCOS", "nodeName", node.ObjectMeta.Name, "RHCOS version", rhcosVersion, ) } else { - n.rec.Log.Info("node doesn't have the proper NFD RHCOS version label.", + n.logger.Info("node doesn't have the proper NFD RHCOS version label.", "nodeName", node.ObjectMeta.Name, "nfdLabel", nfdOSTreeVersionLabelKey, ) @@ -568,7 +567,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { // update node with the latest labels if updateLabels { - err = n.rec.Client.Update(ctx, &node) + err = n.client.Update(ctx, &node) if err != nil { return false, 0, fmt.Errorf("Unable to label node %s for the GPU Operator deployment, err %s", node.ObjectMeta.Name, err.Error()) @@ -576,7 +575,7 @@ func (n *ClusterPolicyController) labelGPUNodes() (bool, int, error) { } } // end node loop - n.rec.Log.Info("Number of nodes with GPU label", "NodeCount", gpuNodesTotal) + n.logger.Info("Number of nodes with GPU label", "NodeCount", gpuNodesTotal) n.operatorMetrics.gpuNodesTotal.Set(float64(gpuNodesTotal)) return clusterHasNFDLabels, gpuNodesTotal, nil } @@ -606,7 +605,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { // The GPU Operator is not installed in the suggested // namespace, so the namespace may be shared with other // untrusted operators. Do not set Pod Security Admission labels. - n.rec.Log.Info("GPU Operator is not installed in the suggested namespace. Not setting Pod Security Admission labels for namespace", + n.logger.Info("GPU Operator is not installed in the suggested namespace. Not setting Pod Security Admission labels for namespace", "namespace", namespaceName, "suggested namespace", ocpSuggestedNamespace) return nil @@ -614,7 +613,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { ns := &corev1.Namespace{} opts := client.ObjectKey{Name: namespaceName} - err := n.rec.Client.Get(ctx, opts, ns) + err := n.client.Get(ctx, opts, ns) if err != nil { return fmt.Errorf("ERROR: could not get Namespace %s from client: %v", namespaceName, err) } @@ -640,7 +639,7 @@ func (n *ClusterPolicyController) setPodSecurityLabelsForNamespace() error { return nil } - err = n.rec.Client.Patch(ctx, ns, patch) + err = n.client.Patch(ctx, ns, patch) if err != nil { return fmt.Errorf("unable to label namespace %s with pod security levels: %v", namespaceName, err) } @@ -657,7 +656,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { // namespace, so the namespace may be shared with other // untrusted operators. Do not enable namespace monitoring in // this case, as per OpenShift/Prometheus best practices. - n.rec.Log.Info("GPU Operator not installed in the suggested namespace, skipping namespace monitoring verification", + n.logger.Info("GPU Operator not installed in the suggested namespace, skipping namespace monitoring verification", "namespace", namespaceName, "suggested namespace", ocpSuggestedNamespace) return nil @@ -665,7 +664,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { ns := &corev1.Namespace{} opts := client.ObjectKey{Name: namespaceName} - err := n.rec.Client.Get(ctx, opts, ns) + err := n.client.Get(ctx, opts, ns) if err != nil { return fmt.Errorf("ERROR: could not get Namespace %s from client: %v", namespaceName, err) } @@ -679,7 +678,7 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { } else { msg = "WARNING: OpenShift monitoring currently disabled on user request" } - n.rec.Log.Info(msg, + n.logger.Info(msg, "namespace", namespaceName, "label", ocpNamespaceMonitoringLabelKey, "value", val, @@ -689,16 +688,16 @@ func (n *ClusterPolicyController) ocpEnsureNamespaceMonitoring() error { } // label not defined, enable monitoring - n.rec.Log.Info("Enabling OpenShift monitoring") - n.rec.Log.V(1).Info("Adding monitoring label to the operator namespace", + n.logger.Info("Enabling OpenShift monitoring") + n.logger.V(1).Info("Adding monitoring label to the operator namespace", "namespace", namespaceName, "label", ocpNamespaceMonitoringLabelKey, "value", ocpNamespaceMonitoringLabelValue) - n.rec.Log.Info("Monitoring can be disabled by setting the namespace label " + + n.logger.Info("Monitoring can be disabled by setting the namespace label " + ocpNamespaceMonitoringLabelKey + "=false") patch := client.MergeFrom(ns.DeepCopy()) ns.ObjectMeta.Labels[ocpNamespaceMonitoringLabelKey] = ocpNamespaceMonitoringLabelValue - err = n.rec.Client.Patch(ctx, ns, patch) + err = n.client.Patch(ctx, ns, patch) if err != nil { return fmt.Errorf("Unable to label namespace %s for the GPU Operator monitoring, err %s", namespaceName, err.Error()) @@ -724,7 +723,7 @@ func (n *ClusterPolicyController) getRuntime() error { client.MatchingLabels{commonGPULabelKey: "true"}, } list := &corev1.NodeList{} - err := n.rec.Client.List(ctx, list, opts...) + err := n.client.List(ctx, list, opts...) if err != nil { return fmt.Errorf("Unable to list nodes prior to checking container runtime: %v", err) } @@ -733,7 +732,7 @@ func (n *ClusterPolicyController) getRuntime() error { for _, node := range list.Items { rt, err := getRuntimeString(node) if err != nil { - n.rec.Log.Info(fmt.Sprintf("Unable to get runtime info for node %s: %v", node.Name, err)) + n.logger.Info(fmt.Sprintf("Unable to get runtime info for node %s: %v", node.Name, err)) continue } runtime = rt @@ -744,7 +743,7 @@ func (n *ClusterPolicyController) getRuntime() error { } if runtime.String() == "" { - n.rec.Log.Info("Unable to get runtime info from the cluster, defaulting to containerd") + n.logger.Info("Unable to get runtime info from the cluster, defaulting to containerd") runtime = gpuv1.Containerd } n.runtime = runtime @@ -754,14 +753,16 @@ func (n *ClusterPolicyController) getRuntime() error { func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterPolicyReconciler, clusterPolicy *gpuv1.ClusterPolicy) error { n.singleton = clusterPolicy n.ctx = ctx - n.rec = reconciler n.idx = 0 + n.logger = reconciler.Log + n.client = reconciler.Client + n.scheme = reconciler.Scheme if len(n.controls) == 0 { clusterPolicyCtrl.operatorNamespace = os.Getenv("OPERATOR_NAMESPACE") if clusterPolicyCtrl.operatorNamespace == "" { - n.rec.Log.Error(nil, "OPERATOR_NAMESPACE environment variable not set, cannot proceed") + n.logger.Error(nil, "OPERATOR_NAMESPACE environment variable not set, cannot proceed") // we cannot do anything without the operator namespace, // let the operator Pod run into `CrashloopBackOff` @@ -782,15 +783,10 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP return fmt.Errorf("k8s version detected '%s' is not a valid semantic version", k8sVersion) } n.k8sVersion = k8sVersion - n.rec.Log.Info("Kubernetes version detected", "version", k8sVersion) - - utilruntime.Must(promv1.AddToScheme(reconciler.Scheme)) - utilruntime.Must(secv1.Install(reconciler.Scheme)) - utilruntime.Must(apiconfigv1.Install(reconciler.Scheme)) - utilruntime.Must(apiimagev1.Install(reconciler.Scheme)) + n.logger.Info("Kubernetes version detected", "version", k8sVersion) n.operatorMetrics = initOperatorMetrics(n) - n.rec.Log.Info("Operator metrics initialized.") + n.logger.Info("Operator metrics initialized.") addState(n, "/opt/gpu-operator/pre-requisites") addState(n, "/opt/gpu-operator/state-operator-metrics") @@ -821,13 +817,13 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP // workload configuration defaultWorkload := clusterPolicy.Spec.SandboxWorkloads.DefaultWorkload if isValidWorkloadConfig(defaultWorkload) { - n.rec.Log.Info("Default GPU workload is overridden in ClusterPolicy", "DefaultWorkload", defaultWorkload) + n.logger.Info("Default GPU workload is overridden in ClusterPolicy", "DefaultWorkload", defaultWorkload) defaultGPUWorkloadConfig = defaultWorkload } } else { n.sandboxEnabled = false } - n.rec.Log.Info("Sandbox workloads", "Enabled", n.sandboxEnabled, "DefaultWorkload", defaultGPUWorkloadConfig) + n.logger.Info("Sandbox workloads", "Enabled", n.sandboxEnabled, "DefaultWorkload", defaultGPUWorkloadConfig) if n.openshift != "" && (n.singleton.Spec.Operator.UseOpenShiftDriverToolkit == nil || *n.singleton.Spec.Operator.UseOpenShiftDriverToolkit) { @@ -849,12 +845,12 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP if clusterPolicy.Spec.PSA.IsEnabled() { // label namespace with Pod Security Admission levels - n.rec.Log.Info("Pod Security is enabled. Adding labels to GPU Operator namespace", "namespace", n.operatorNamespace) + n.logger.Info("Pod Security is enabled. Adding labels to GPU Operator namespace", "namespace", n.operatorNamespace) err := n.setPodSecurityLabelsForNamespace() if err != nil { return err } - n.rec.Log.Info("Pod Security Admission labels added to GPU Operator namespace", "namespace", n.operatorNamespace) + n.logger.Info("Pod Security Admission labels added to GPU Operator namespace", "namespace", n.operatorNamespace) } // fetch all nodes and label gpu nodes @@ -876,13 +872,13 @@ func (n *ClusterPolicyController) init(ctx context.Context, reconciler *ClusterP if err != nil { return err } - n.rec.Log.Info(fmt.Sprintf("Using container runtime: %s", n.runtime.String())) + n.logger.Info(fmt.Sprintf("Using container runtime: %s", n.runtime.String())) // fetch all kernel versions from the GPU nodes in the cluster if n.singleton.Spec.Driver.IsEnabled() && n.singleton.Spec.Driver.UsePrecompiledDrivers() { kernelVersionMap, err := n.getKernelVersionsMap() if err != nil { - n.rec.Log.Info("Unable to obtain all kernel versions of the GPU nodes in the cluster", "err", err) + n.logger.Info("Unable to obtain all kernel versions of the GPU nodes in the cluster", "err", err) return err } n.kernelVersionMap = kernelVersionMap @@ -906,7 +902,7 @@ func (n *ClusterPolicyController) initOCPParams() error { } else if n.ocpDriverToolkit.requested { hasImageStream, err := ocpHasDriverToolkitImageStream(n) if err != nil { - n.rec.Log.Info("ocpHasDriverToolkitImageStream", "err", err) + n.logger.Info("ocpHasDriverToolkitImageStream", "err", err) return err } hasCompatibleNFD := len(n.ocpDriverToolkit.rhcosVersions) != 0 @@ -917,11 +913,11 @@ func (n *ClusterPolicyController) initOCPParams() error { } else { n.operatorMetrics.openshiftDriverToolkitEnabled.Set(openshiftDriverToolkitNotPossible) } - n.rec.Log.Info("OpenShift Driver Toolkit requested", + n.logger.Info("OpenShift Driver Toolkit requested", "hasCompatibleNFD", hasCompatibleNFD, "hasDriverToolkitImageStream", hasImageStream) - n.rec.Log.Info("OpenShift Driver Toolkit", + n.logger.Info("OpenShift Driver Toolkit", "enabled", n.ocpDriverToolkit.enabled) if hasImageStream { @@ -954,7 +950,7 @@ func (n *ClusterPolicyController) step() (gpuv1.State, error) { // updating / deleting objects owned by another controller. if (n.stateNames[n.idx] == "state-driver" || n.stateNames[n.idx] == "state-vgpu-manager") && n.singleton.Spec.Driver.UseNvdiaDriverCRDType() { - n.rec.Log.Info("NVIDIADriver CRD is enabled, cleaning up all NVIDIA driver daemonsets owned by ClusterPolicy") + n.logger.Info("NVIDIADriver CRD is enabled, cleaning up all NVIDIA driver daemonsets owned by ClusterPolicy") n.idx++ // Cleanup all driver daemonsets owned by ClusterPolicy. err := n.cleanupAllDriverDaemonSets(n.ctx) @@ -1032,7 +1028,7 @@ func (n ClusterPolicyController) isStateEnabled(stateName string) bool { case "state-operator-metrics": return true default: - n.rec.Log.Error(nil, "invalid state passed", "stateName", stateName) + n.logger.Error(nil, "invalid state passed", "stateName", stateName) return false } } From ba56fd77551149d6420fd72d63196397ee23e34e Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Mon, 8 Apr 2024 09:01:21 -0700 Subject: [PATCH 012/293] bump cuda base image, golang and golangci-lint versions Signed-off-by: Tariq Ibrahim --- .../gpu-operator-certified.clusterserviceversion.yaml | 8 ++++---- deployments/gpu-operator/values.yaml | 4 ++-- docker/Dockerfile.devel | 2 +- validator/versions.mk | 2 +- versions.mk | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 5b142266b..6fe53d14d 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -221,13 +221,13 @@ spec: - name: mig-manager-image image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0 - name: init-container-image - image: nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d + image: nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0 - name: gpu-operator-validator-image image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8 - name: k8s-driver-manager-image image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:e349905fb91d8ebb3c6487e739ef004a55fd8ec7ba5c5c910b25c5a700f91f53 - name: vfio-manager-image - image: nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d + image: nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0 - name: sandbox-device-plugin-image image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3 - name: vgpu-device-manager-image @@ -913,9 +913,9 @@ spec: - name: "MIG_MANAGER_IMAGE" value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0" - name: "CUDA_BASE_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d" + value: "nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0" - name: "VFIO_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:714547d54e5fe4191019a1e5f1daffc7fab7481b619b79c378541dafc76c9e5d" + value: "nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0" - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3" - name: "VGPU_DEVICE_MANAGER_IMAGE" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index c08c630ca..13157a454 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -71,7 +71,7 @@ operator: initContainer: image: cuda repository: nvcr.io/nvidia - version: 12.3.2-base-ubi8 + version: 12.4.0-base-ubi8 imagePullPolicy: IfNotPresent tolerations: - key: "node-role.kubernetes.io/master" @@ -421,7 +421,7 @@ vfioManager: enabled: true repository: nvcr.io/nvidia image: cuda - version: 12.3.2-base-ubi8 + version: 12.4.0-base-ubi8 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] diff --git a/docker/Dockerfile.devel b/docker/Dockerfile.devel index 5deb5be52..6b4363c1c 100644 --- a/docker/Dockerfile.devel +++ b/docker/Dockerfile.devel @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. ARG GOLANG_VERSION=x.x.x -ARG GOLANGCI_LINT_VERSION=1.55.2 +ARG GOLANGCI_LINT_VERSION=1.57.2 FROM golang:${GOLANG_VERSION} diff --git a/validator/versions.mk b/validator/versions.mk index c8294d44b..1632c6392 100644 --- a/validator/versions.mk +++ b/validator/versions.mk @@ -16,4 +16,4 @@ include $(CURDIR)/../versions.mk CUDA_SAMPLES_VERSION ?= 11.7.1 -GOLANG_VERSION ?= 1.21.1 +GOLANG_VERSION ?= 1.22.2 diff --git a/versions.mk b/versions.mk index 57d28f9ea..36a6bbb35 100644 --- a/versions.mk +++ b/versions.mk @@ -19,7 +19,7 @@ # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) VERSION ?= v23.9.2 -CUDA_VERSION ?= 12.3.2 -GOLANG_VERSION ?= 1.21.5 +CUDA_VERSION ?= 12.4.0 +GOLANG_VERSION ?= 1.22.2 GIT_COMMIT ?= $(shell git describe --match="" --dirty --long --always 2> /dev/null || echo "") From f66b45f5a91236b9525f31fe706918ba49ce2c73 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Fri, 5 Apr 2024 09:38:21 -0700 Subject: [PATCH 013/293] bump NFD to v0.15.4 Signed-off-by: Tariq Ibrahim --- deployments/gpu-operator/Chart.lock | 6 +++--- deployments/gpu-operator/Chart.yaml | 2 +- .../gpu-operator/charts/node-feature-discovery/Chart.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deployments/gpu-operator/Chart.lock b/deployments/gpu-operator/Chart.lock index 49c9dca17..497649e71 100644 --- a/deployments/gpu-operator/Chart.lock +++ b/deployments/gpu-operator/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: node-feature-discovery repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts - version: 0.15.3 -digest: sha256:541f77527d949998bca4bc887e2283f2fb41892f8f61635d8afab03b7fa9f134 -generated: "2024-03-15T16:27:37.327877+01:00" + version: 0.15.4 +digest: sha256:025d011a09907e4b274e550b36689c47ed12722543624debd1e7d709d70462d8 +generated: "2024-04-05T09:13:06.247476-07:00" diff --git a/deployments/gpu-operator/Chart.yaml b/deployments/gpu-operator/Chart.yaml index 1fd4d83ff..a6083cdbe 100644 --- a/deployments/gpu-operator/Chart.yaml +++ b/deployments/gpu-operator/Chart.yaml @@ -19,6 +19,6 @@ keywords: dependencies: - name: node-feature-discovery - version: v0.15.3 + version: v0.15.4 repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts condition: nfd.enabled diff --git a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml index 762f8c8f1..a57e62eda 100644 --- a/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml +++ b/deployments/gpu-operator/charts/node-feature-discovery/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.15.3 +appVersion: v0.15.4 description: 'Detects hardware features available on each node in a Kubernetes cluster, and advertises those features using node labels. ' home: https://github.com/kubernetes-sigs/node-feature-discovery @@ -11,4 +11,4 @@ name: node-feature-discovery sources: - https://github.com/kubernetes-sigs/node-feature-discovery type: application -version: 0.15.3 +version: 0.15.4 From 2b28883530442099aec6fa11a7775200baae6b49 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Thu, 11 Apr 2024 14:37:12 -0700 Subject: [PATCH 014/293] bump cuda base image to 12.4.1 --- .../gpu-operator-certified.clusterserviceversion.yaml | 8 ++++---- deployments/gpu-operator/values.yaml | 4 ++-- versions.mk | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml index 6fe53d14d..3e4e8284d 100644 --- a/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml +++ b/bundle/manifests/gpu-operator-certified.clusterserviceversion.yaml @@ -221,13 +221,13 @@ spec: - name: mig-manager-image image: nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0 - name: init-container-image - image: nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0 + image: nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a - name: gpu-operator-validator-image image: registry.gitlab.com/nvidia/kubernetes/gpu-operator/staging/gpu-operator-validator:master-latest-ubi8 - name: k8s-driver-manager-image image: nvcr.io/nvidia/cloud-native/k8s-driver-manager@sha256:e349905fb91d8ebb3c6487e739ef004a55fd8ec7ba5c5c910b25c5a700f91f53 - name: vfio-manager-image - image: nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0 + image: nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a - name: sandbox-device-plugin-image image: nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3 - name: vgpu-device-manager-image @@ -913,9 +913,9 @@ spec: - name: "MIG_MANAGER_IMAGE" value: "nvcr.io/nvidia/cloud-native/k8s-mig-manager@sha256:a67d8e92861a2dce5649105c07561e4422e9fe4ba81a6525dc0d70a7ef85f9c0" - name: "CUDA_BASE_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0" + value: "nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a" - name: "VFIO_MANAGER_IMAGE" - value: "nvcr.io/nvidia/cuda@sha256:413a8a8ab23d0503befb1ca3bd4323ff3b44564114bec3d49ab150cd2b7265b0" + value: "nvcr.io/nvidia/cuda@sha256:ae0623ec8634b6c88f815b88037763def160cbbac15013b77ddef257fc276c9a" - name: "SANDBOX_DEVICE_PLUGIN_IMAGE" value: "nvcr.io/nvidia/kubevirt-gpu-device-plugin@sha256:3dca406c56cb567f94cf9a7582b2dbdd67bdd7c3c4475fe61f5d886d834929f3" - name: "VGPU_DEVICE_MANAGER_IMAGE" diff --git a/deployments/gpu-operator/values.yaml b/deployments/gpu-operator/values.yaml index 13157a454..0eb0d0834 100644 --- a/deployments/gpu-operator/values.yaml +++ b/deployments/gpu-operator/values.yaml @@ -71,7 +71,7 @@ operator: initContainer: image: cuda repository: nvcr.io/nvidia - version: 12.4.0-base-ubi8 + version: 12.4.1-base-ubi8 imagePullPolicy: IfNotPresent tolerations: - key: "node-role.kubernetes.io/master" @@ -421,7 +421,7 @@ vfioManager: enabled: true repository: nvcr.io/nvidia image: cuda - version: 12.4.0-base-ubi8 + version: 12.4.1-base-ubi8 imagePullPolicy: IfNotPresent imagePullSecrets: [] env: [] diff --git a/versions.mk b/versions.mk index 36a6bbb35..eb869b9b4 100644 --- a/versions.mk +++ b/versions.mk @@ -19,7 +19,7 @@ # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) VERSION ?= v23.9.2 -CUDA_VERSION ?= 12.4.0 +CUDA_VERSION ?= 12.4.1 GOLANG_VERSION ?= 1.22.2 GIT_COMMIT ?= $(shell git describe --match="" --dirty --long --always 2> /dev/null || echo "") From 3cabcda89042d7adefb8708f2b64ddc72b144721 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Fri, 12 Apr 2024 10:45:45 -0700 Subject: [PATCH 015/293] update various golang dependencies Signed-off-by: Tariq Ibrahim --- .common-ci.yml | 2 +- go.mod | 54 +- go.sum | 110 +- .../github.com/golang/protobuf/ptypes/any.go | 7 +- .../golang_protobuf_extensions/v2/LICENSE | 201 --- .../golang_protobuf_extensions/v2/NOTICE | 1 - .../v2/pbutil/.gitignore | 1 - .../v2/pbutil/Makefile | 7 - .../v2/pbutil/decode.go | 81 -- .../v2/pbutil/doc.go | 16 - .../v2/pbutil/encode.go | 49 - .../mittwald/go-helm-client/client.go | 2 + .../mittwald/go-helm-client/types.go | 2 + vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 53 + vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 17 +- .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 ++ .../ginkgo/internal/profiles_and_reports.go | 44 +- .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 3 +- .../onsi/ginkgo/v2/ginkgo/outline/import.go | 9 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 5 + .../onsi/ginkgo/v2/internal/node.go | 33 +- .../onsi/ginkgo/v2/internal/spec_context.go | 4 +- .../onsi/ginkgo/v2/internal/suite.go | 20 +- .../ginkgo/v2/reporters/default_reporter.go | 47 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 12 + .../onsi/ginkgo/v2/reporting_dsl.go | 59 +- .../github.com/onsi/ginkgo/v2/types/config.go | 5 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 15 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 38 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../onsi/gomega/internal/async_assertion.go | 7 +- vendor/github.com/onsi/gomega/matchers.go | 2 +- .../matchers/be_comparable_to_matcher.go | 4 +- .../prometheus/common/expfmt/decode.go | 27 +- .../prometheus/common/expfmt/encode.go | 78 +- .../prometheus/common/expfmt/expfmt.go | 144 +- .../common/expfmt/openmetrics_create.go | 85 +- .../prometheus/common/expfmt/text_create.go | 118 +- .../prometheus/common/expfmt/text_parse.go | 8 +- .../prometheus/common/model/alert.go | 4 +- .../prometheus/common/model/labels.go | 22 +- .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 368 ++++- .../prometheus/common/model/signature.go | 6 +- .../prometheus/common/model/silence.go | 2 +- .../prometheus/common/model/value.go | 16 +- .../prometheus/common/model/value_float.go | 14 +- .../github.com/regclient/regclient/.gitignore | 3 +- .../regclient/regclient/.version-bump.lock | 134 +- .../regclient/regclient/.version-bump.yml | 186 ++- .../regclient/regclient/CODE_OF_CONDUCT.md | 4 +- .../regclient/regclient/CONTRIBUTING.md | 19 +- .../github.com/regclient/regclient/Makefile | 174 ++- .../github.com/regclient/regclient/README.md | 13 +- .../regclient/regclient/SECURITY.md | 4 +- vendor/github.com/regclient/regclient/blob.go | 190 ++- .../regclient/regclient/config/credhelper.go | 11 +- .../regclient/regclient/config/docker.go | 17 +- .../regclient/regclient/config/host.go | 130 +- .../github.com/regclient/regclient/image.go | 1034 +++++++++----- .../regclient/regclient/internal/auth/auth.go | 8 +- .../regclient/internal/auth/error.go | 34 +- .../regclient/internal/cache/cache.go | 182 +++ .../regclient/internal/conffile/conffile.go | 9 +- .../regclient/internal/httplink/httplink.go | 20 +- .../regclient/internal/limitread/limitread.go | 29 + .../regclient/internal/muset/muset.go | 49 + .../regclient/internal/reghttp/http.go | 200 +-- .../regclient/regclient/internal/rwfs/mem.go | 73 + .../regclient/regclient/internal/rwfs/os.go | 13 + .../regclient/regclient/internal/rwfs/rwfs.go | 81 +- .../regclient/internal/strparse/strparse.go | 91 ++ .../regclient/internal/throttle/throttle.go | 192 +++ .../regclient/internal/units/size.go | 94 +- .../regclient/internal/version/version.go | 4 +- .../regclient/internal/wraperr/error.go | 34 - .../regclient/regclient/manifest.go | 40 +- vendor/github.com/regclient/regclient/ping.go | 18 + .../regclient/pkg/archive/compress.go | 7 +- .../regclient/regclient/pkg/archive/tar.go | 19 +- .../regclient/regclient/referrer.go | 8 +- .../regclient/regclient/regclient.go | 63 +- .../github.com/regclient/regclient/release.md | 154 +- vendor/github.com/regclient/regclient/repo.go | 15 +- .../github.com/regclient/regclient/scheme.go | 16 +- .../regclient/regclient/scheme/ocidir/blob.go | 105 +- .../regclient/scheme/ocidir/close.go | 20 +- .../regclient/scheme/ocidir/manifest.go | 96 +- .../regclient/scheme/ocidir/ocidir.go | 223 ++- .../regclient/regclient/scheme/ocidir/ping.go | 28 + .../regclient/scheme/ocidir/referrer.go | 45 +- .../regclient/regclient/scheme/ocidir/tag.go | 22 +- .../regclient/regclient/scheme/reg/blob.go | 121 +- .../regclient/scheme/reg/manifest.go | 100 +- .../regclient/regclient/scheme/reg/ping.go | 41 + .../regclient/scheme/reg/referrer.go | 123 +- .../regclient/regclient/scheme/reg/reg.go | 132 +- .../regclient/regclient/scheme/reg/repo.go | 6 +- .../regclient/regclient/scheme/reg/tag.go | 58 +- .../regclient/regclient/scheme/scheme.go | 191 +-- vendor/github.com/regclient/regclient/tag.go | 8 + .../regclient/regclient/types/blob/blob.go | 48 +- .../regclient/regclient/types/blob/common.go | 65 +- .../regclient/types/blob/ociconfig.go | 110 +- .../regclient/regclient/types/blob/reader.go | 157 ++- .../regclient/regclient/types/blob/tar.go | 101 +- .../regclient/regclient/types/callback.go | 29 + .../regclient/regclient/types/descriptor.go | 203 +-- .../regclient/types/descriptor/descriptor.go | 293 ++++ .../types/docker/schema1/manifest.go | 9 +- .../types/docker/schema2/manifest.go | 9 +- .../types/docker/schema2/manifestlist.go | 7 +- .../regclient/regclient/types/error.go | 154 +- .../regclient/regclient/types/errs/error.go | 87 ++ .../regclient/types/manifest/common.go | 9 +- .../regclient/types/manifest/docker1.go | 113 +- .../regclient/types/manifest/docker2.go | 133 +- .../regclient/types/manifest/manifest.go | 186 +-- .../regclient/types/manifest/oci1.go | 249 ++-- .../regclient/regclient/types/mediatype.go | 123 +- .../regclient/types/mediatype/mediatype.go | 51 + .../regclient/types/oci/v1/artifact.go | 12 +- .../regclient/types/oci/v1/config.go | 61 +- .../regclient/regclient/types/oci/v1/index.go | 10 +- .../regclient/types/oci/v1/manifest.go | 19 +- .../regclient/regclient/types/ping/ping.go | 13 + .../regclient/types/platform/compare.go | 213 +++ .../regclient/types/platform/cpuinfo.go | 101 +- .../regclient/types/platform/cpuinfo_armx.go | 83 ++ .../regclient/types/platform/cpuinfo_other.go | 8 + .../regclient/types/platform/cpuinfo_x86.go | 100 ++ .../regclient/types/platform/cpuinfo_x86.s | 26 + .../regclient/types/platform/os_darwin.go | 8 + .../regclient/types/platform/os_other.go | 8 + .../regclient/types/platform/platform.go | 137 +- .../types/platform/platform_other.go | 4 +- .../types/platform/platform_windows.go | 4 +- .../regclient/regclient/types/ref/ref.go | 168 ++- .../regclient/types/referrer/referrer.go | 39 +- .../regclient/types/repo/repolist.go | 8 +- .../regclient/regclient/types/tag/taglist.go | 73 +- .../regclient/types/warning/warning.go | 2 +- vendor/github.com/ulikunitz/xz/.gitignore | 28 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 77 + vendor/github.com/ulikunitz/xz/SECURITY.md | 10 + vendor/github.com/ulikunitz/xz/TODO.md | 372 +++++ vendor/github.com/ulikunitz/xz/bits.go | 79 ++ vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/format.go | 721 ++++++++++ .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 +++ .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 456 ++++++ .../github.com/ulikunitz/xz/lzma/bintree.go | 522 +++++++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 47 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 +++ .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 ++++ .../ulikunitz/xz/lzma/decoderdict.go | 128 ++ .../ulikunitz/xz/lzma/directcodec.go | 38 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 ++ .../github.com/ulikunitz/xz/lzma/encoder.go | 268 ++++ .../ulikunitz/xz/lzma/encoderdict.go | 149 ++ vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 ++++ vendor/github.com/ulikunitz/xz/lzma/header.go | 167 +++ .../github.com/ulikunitz/xz/lzma/header2.go | 398 ++++++ .../ulikunitz/xz/lzma/lengthcodec.go | 115 ++ .../ulikunitz/xz/lzma/literalcodec.go | 125 ++ .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 55 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 222 +++ vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 ++ .../github.com/ulikunitz/xz/lzma/reader2.go | 231 +++ vendor/github.com/ulikunitz/xz/lzma/state.go | 145 ++ .../ulikunitz/xz/lzma/treecodecs.go | 133 ++ vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 +++ .../github.com/ulikunitz/xz/lzma/writer2.go | 305 ++++ vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 ++ vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 359 +++++ vendor/github.com/ulikunitz/xz/writer.go | 399 ++++++ vendor/go.uber.org/zap/.golangci.yml | 2 +- vendor/go.uber.org/zap/.readme.tmpl | 10 +- vendor/go.uber.org/zap/CHANGELOG.md | 54 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/README.md | 66 +- vendor/go.uber.org/zap/buffer/buffer.go | 2 +- vendor/go.uber.org/zap/field.go | 2 + vendor/go.uber.org/zap/logger.go | 39 +- vendor/go.uber.org/zap/options.go | 15 + vendor/go.uber.org/zap/sugar.go | 39 + .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/encoder.go | 15 + vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 + vendor/golang.org/x/sys/unix/aliases.go | 2 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 ++ .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 60 + vendor/golang.org/x/tools/cover/profile.go | 266 ++++ .../encoding/protodelim/protodelim.go | 160 +++ .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults}/editions_defaults.binpb | 2 +- .../protobuf/internal/filedesc/desc.go | 67 +- .../protobuf/internal/filedesc/desc_init.go | 52 + .../protobuf/internal/filedesc/desc_lazy.go | 28 + .../protobuf/internal/filedesc/editions.go | 142 ++ .../protobuf/internal/genid/descriptor_gen.go | 152 +- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/codec_extension.go | 22 +- .../protobuf/internal/impl/codec_tables.go | 2 +- .../internal/impl/message_reflect_field.go | 2 +- .../protobuf/internal/strs/strings.go | 2 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/reflect/protodesc/desc_init.go | 42 +- .../reflect/protodesc/desc_resolve.go | 4 +- .../reflect/protodesc/desc_validate.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 131 +- .../protobuf/reflect/protoreflect/proto.go | 2 + .../reflect/protoreflect/source_gen.go | 2 - .../types/descriptorpb/descriptor.pb.go | 1254 ++++++++--------- .../types/gofeaturespb/go_features.pb.go | 177 +++ .../types/gofeaturespb/go_features.proto | 28 + .../helm/v3/internal/version/version.go | 2 +- vendor/helm.sh/helm/v3/pkg/action/hooks.go | 16 +- vendor/helm.sh/helm/v3/pkg/action/lint.go | 7 +- vendor/helm.sh/helm/v3/pkg/action/package.go | 2 +- .../helm/v3/pkg/action/registry_login.go | 2 +- .../helm/v3/pkg/action/registry_logout.go | 2 +- vendor/helm.sh/helm/v3/pkg/action/upgrade.go | 11 + .../helm/v3/pkg/chart/loader/directory.go | 2 +- vendor/helm.sh/helm/v3/pkg/chart/metadata.go | 15 + .../helm.sh/helm/v3/pkg/chartutil/coalesce.go | 2 +- .../helm.sh/helm/v3/pkg/chartutil/create.go | 17 +- .../helm/v3/pkg/chartutil/dependencies.go | 3 +- .../helm.sh/helm/v3/pkg/chartutil/errors.go | 8 + vendor/helm.sh/helm/v3/pkg/chartutil/save.go | 20 + vendor/helm.sh/helm/v3/pkg/cli/environment.go | 21 + .../helm.sh/helm/v3/pkg/downloader/manager.go | 7 + vendor/helm.sh/helm/v3/pkg/engine/engine.go | 120 +- .../helm.sh/helm/v3/pkg/engine/lookup_func.go | 23 +- .../helm/v3/pkg/getter/plugingetter.go | 10 +- .../helm/v3/{internal => pkg}/ignore/doc.go | 2 +- .../helm/v3/{internal => pkg}/ignore/rules.go | 0 vendor/helm.sh/helm/v3/pkg/kube/client.go | 8 +- .../helm.sh/helm/v3/pkg/kube/fake/printer.go | 4 +- vendor/helm.sh/helm/v3/pkg/kube/ready.go | 84 +- vendor/helm.sh/helm/v3/pkg/lint/lint.go | 10 +- .../helm/v3/pkg/lint/rules/chartfile.go | 4 + .../helm/v3/pkg/lint/rules/dependencies.go | 21 + .../helm/v3/pkg/lint/rules/deprecations.go | 17 +- .../helm/v3/pkg/lint/rules/template.go | 17 +- vendor/helm.sh/helm/v3/pkg/plugin/plugin.go | 4 + vendor/helm.sh/helm/v3/pkg/pusher/pusher.go | 2 +- .../helm/v3/pkg/releaseutil/kind_sorter.go | 2 +- vendor/helm.sh/helm/v3/pkg/repo/index.go | 4 + .../helm.sh/helm/v3/pkg/storage/driver/sql.go | 2 +- vendor/k8s.io/api/core/v1/generated.proto | 2 +- vendor/k8s.io/api/core/v1/types.go | 2 +- .../core/v1/types_swagger_doc_generated.go | 2 +- vendor/modules.txt | 87 +- .../controller-runtime/pkg/cache/cache.go | 42 +- .../pkg/client/apiutil/restmapper.go | 28 +- .../pkg/client/fake/client.go | 6 +- .../pkg/manager/internal.go | 2 + .../pkg/manager/runnable_group.go | 15 +- 281 files changed, 17077 insertions(+), 4056 deletions(-) delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/github.com/prometheus/common/model/metadata.go create mode 100644 vendor/github.com/regclient/regclient/internal/cache/cache.go create mode 100644 vendor/github.com/regclient/regclient/internal/limitread/limitread.go create mode 100644 vendor/github.com/regclient/regclient/internal/muset/muset.go create mode 100644 vendor/github.com/regclient/regclient/internal/strparse/strparse.go create mode 100644 vendor/github.com/regclient/regclient/internal/throttle/throttle.go delete mode 100644 vendor/github.com/regclient/regclient/internal/wraperr/error.go create mode 100644 vendor/github.com/regclient/regclient/ping.go create mode 100644 vendor/github.com/regclient/regclient/scheme/ocidir/ping.go create mode 100644 vendor/github.com/regclient/regclient/scheme/reg/ping.go create mode 100644 vendor/github.com/regclient/regclient/types/callback.go create mode 100644 vendor/github.com/regclient/regclient/types/descriptor/descriptor.go create mode 100644 vendor/github.com/regclient/regclient/types/errs/error.go create mode 100644 vendor/github.com/regclient/regclient/types/mediatype/mediatype.go create mode 100644 vendor/github.com/regclient/regclient/types/ping/ping.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/compare.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s create mode 100644 vendor/github.com/regclient/regclient/types/platform/os_darwin.go create mode 100644 vendor/github.com/regclient/regclient/types/platform/os_other.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/SECURITY.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/golang.org/x/tools/cover/profile.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go rename vendor/google.golang.org/protobuf/{reflect/protodesc => internal/editiondefaults}/editions_defaults.binpb (69%) create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto rename vendor/helm.sh/helm/v3/{internal => pkg}/ignore/doc.go (97%) rename vendor/helm.sh/helm/v3/{internal => pkg}/ignore/rules.go (100%) diff --git a/.common-ci.yml b/.common-ci.yml index 35351153f..4ff04c33f 100644 --- a/.common-ci.yml +++ b/.common-ci.yml @@ -149,7 +149,7 @@ trigger-pipeline: # Download the regctl binary for use in the release steps .regctl-setup: before_script: - - export REGCTL_VERSION=v0.3.10 + - export REGCTL_VERSION=v0.6.0 - apk add --no-cache curl - mkdir -p bin - curl -sSLo bin/regctl https://github.com/regclient/regclient/releases/download/${REGCTL_VERSION}/regctl-linux-amd64 diff --git a/go.mod b/go.mod index 75a9fbfe2..c9ff8b672 100644 --- a/go.mod +++ b/go.mod @@ -4,33 +4,33 @@ go 1.21 require ( github.com/Masterminds/sprig/v3 v3.2.3 - github.com/NVIDIA/go-nvlib v0.1.0 + github.com/NVIDIA/go-nvlib v0.2.0 github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c github.com/NVIDIA/nvidia-container-toolkit v1.14.6 github.com/davecgh/go-spew v1.1.1 github.com/go-logr/logr v1.4.1 github.com/mitchellh/hashstructure v1.1.0 - github.com/mittwald/go-helm-client v0.12.7 - github.com/onsi/ginkgo/v2 v2.14.0 - github.com/onsi/gomega v1.30.0 + github.com/mittwald/go-helm-client v0.12.9 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5 github.com/operator-framework/api v0.17.6 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2 - github.com/prometheus/client_golang v1.18.0 - github.com/regclient/regclient v0.4.8 + github.com/prometheus/client_golang v1.19.0 + github.com/regclient/regclient v0.6.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.27.1 - go.uber.org/zap v1.26.0 - golang.org/x/mod v0.15.0 - k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.1 + go.uber.org/zap v1.27.0 + golang.org/x/mod v0.17.0 + k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v0.29.3 k8s.io/klog/v2 v2.110.1 - sigs.k8s.io/controller-runtime v0.17.1 + sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/yaml v1.4.0 ) @@ -80,7 +80,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -109,7 +109,6 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -128,7 +127,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rubenv/sql-migrate v1.6.0 // indirect @@ -137,6 +136,7 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect @@ -149,29 +149,29 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 // indirect go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.17.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.13.3 // indirect - k8s.io/apiserver v0.29.1 // indirect + helm.sh/helm/v3 v3.14.2 // indirect + k8s.io/apiserver v0.29.3 // indirect k8s.io/cli-runtime v0.29.1 // indirect - k8s.io/component-base v0.29.1 // indirect + k8s.io/component-base v0.29.3 // indirect k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 // indirect k8s.io/kubectl v0.29.1 // indirect k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect diff --git a/go.sum b/go.sum index aa60d3945..471db6a74 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/NVIDIA/go-nvlib v0.1.0 h1:VYNqzGRaE5zrku1ysS9J+hSkuuwTpYuSLqDF1BaCNUs= -github.com/NVIDIA/go-nvlib v0.1.0/go.mod h1:lDrLM77CNdwfCN5ySYpuyzBQLQR6pGC+rHri1T4l+l4= +github.com/NVIDIA/go-nvlib v0.2.0 h1:roq+SDstbP1fcy2XVH7wB2Gz2/Ud7Q+NGQYOcVITVrA= +github.com/NVIDIA/go-nvlib v0.2.0/go.mod h1:kFuLNTyD1tF6FbRFlk+/EdUW5BrkE+v1Y3A3/9zKSjA= github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a h1:3nyTp1cXzZMHoUuhMwHdz9QDzl100ECvIDYFxdjWk6o= github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a/go.mod h1:K7HCLTndSwBEZwBu6sU7daVeryV1Qt/DtKH8nONJj4o= github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c h1:nt9jPM6K7DCYydMKhlfMrZ9aFasdNU4WKUZvO4cN2us= @@ -153,8 +153,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -244,8 +244,6 @@ github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -258,8 +256,8 @@ github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mittwald/go-helm-client v0.12.7 h1:+rhDcLP96d+IavqTORDsY8tc6BNzukwzka85HX5hBzw= -github.com/mittwald/go-helm-client v0.12.7/go.mod h1:ipUXxirGzXl7hfMV4lY3SYXhwudYYb6T/dnNfKHDQiM= +github.com/mittwald/go-helm-client v0.12.9 h1:tfI5ECgrbfAolA9TnlCeA5F2TEIvdsOxVmoSyW80lCI= +github.com/mittwald/go-helm-client v0.12.9/go.mod h1:ukR3Et5zbfBij7bFL1ZnLvPytsbBXCrI2qQYr2yVi9I= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -284,10 +282,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/olareg/olareg v0.0.0-20240323210534-20ec9e4f6dd4 h1:1I7mTStFqh+DqPG9rRjEhEallPi2MQg2uACGImFGS1Q= +github.com/olareg/olareg v0.0.0-20240323210534-20ec9e4f6dd4/go.mod h1:RBuU7JW7SoIIxZKzLRhq8sVtQeAHzCAtRrXEBx2KlM4= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -314,23 +314,23 @@ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2/g github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/regclient/regclient v0.4.8 h1:h4uZRR4OT4oO+50qWu4bj+rzqRs/JwD3erb6lHIkYK4= -github.com/regclient/regclient v0.4.8/go.mod h1:UC6i29I09h9KHyABGLGvsvGi7KYRY8ZKLyt7fzvW4oE= +github.com/regclient/regclient v0.6.0 h1:P+8L9fdOTCo6S6O0/vE/C47csVY5UW5CMEzVwENVbWA= +github.com/regclient/regclient v0.6.0/go.mod h1:ofqAoAhp8T8us4pXT2acvmpUnqjfEdXt3y3b9YunqGo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -366,6 +366,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -406,23 +408,23 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -432,18 +434,18 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -460,13 +462,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -481,8 +483,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -497,8 +499,8 @@ google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -518,22 +520,22 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.13.3 h1:0zPEdGqHcubehJHP9emCtzRmu8oYsJFRrlVF3TFj8xY= -helm.sh/helm/v3 v3.13.3/go.mod h1:3OKO33yI3p4YEXtTITN2+4oScsHeQe71KuzhlZ+aPfg= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= +helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= +helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 h1:+XYBQU3ZKUu60H6fEnkitTTabGoKfIG8zczhZBENu9o= @@ -544,8 +546,8 @@ k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCf k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= -sigs.k8s.io/controller-runtime v0.17.1 h1:V1dQELMGVk46YVXXQUbTFujU7u4DQj6YUj9Rb6cuzz8= -sigs.k8s.io/controller-runtime v0.17.1/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= +sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.16.0 h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g= diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f5736..fdff3fdb4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go deleted file mode 100644 index 7c08e564f..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "google.golang.org/protobuf/proto" -) - -// TODO: Give error package name prefix in next minor release. -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify a decode buffer in the - // next major version. - - // TODO: Consider using error wrapping to annotate error state in pass- - // through cases in the next minor version. - - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil); but if it does, it should - // be treated as no-op according to the Reader contract. - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go deleted file mode 100644 index c318385cb..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go deleted file mode 100644 index e58dd9d29..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "google.golang.org/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - // TODO: Consider allowing the caller to specify an encode buffer in the - // next major version. - - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/mittwald/go-helm-client/client.go b/vendor/github.com/mittwald/go-helm-client/client.go index 3a23113de..8d7f3d023 100644 --- a/vendor/github.com/mittwald/go-helm-client/client.go +++ b/vendor/github.com/mittwald/go-helm-client/client.go @@ -926,6 +926,7 @@ func mergeInstallOptions(chartSpec *ChartSpec, installOptions *action.Install) { installOptions.Atomic = chartSpec.Atomic installOptions.SkipCRDs = chartSpec.SkipCRDs installOptions.DryRun = chartSpec.DryRun + installOptions.DryRunOption = chartSpec.DryRunOption installOptions.SubNotes = chartSpec.SubNotes installOptions.WaitForJobs = chartSpec.WaitForJobs } @@ -946,6 +947,7 @@ func mergeUpgradeOptions(chartSpec *ChartSpec, upgradeOptions *action.Upgrade) { upgradeOptions.Atomic = chartSpec.Atomic upgradeOptions.CleanupOnFail = chartSpec.CleanupOnFail upgradeOptions.DryRun = chartSpec.DryRun + upgradeOptions.DryRunOption = chartSpec.DryRunOption upgradeOptions.SubNotes = chartSpec.SubNotes upgradeOptions.WaitForJobs = chartSpec.WaitForJobs } diff --git a/vendor/github.com/mittwald/go-helm-client/types.go b/vendor/github.com/mittwald/go-helm-client/types.go index e78f75615..430254de1 100644 --- a/vendor/github.com/mittwald/go-helm-client/types.go +++ b/vendor/github.com/mittwald/go-helm-client/types.go @@ -188,6 +188,8 @@ type ChartSpec struct { // DryRun indicates whether to perform a dry run. // +optional DryRun bool `json:"dryRun,omitempty"` + // DryRunOption controls whether the operation is prepared, but not executed with options on whether or not to interact with the remote cluster. + DryRunOption string `json:"dryRunOption,omitempty"` // Description specifies a custom description for the uninstalled release // +optional Description string `json:"description,omitempty"` diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index fbe515639..44222220a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,56 @@ +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + +## 2.15.0 + +### Features + +- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70] +- include cancellation reason when cancelling spec context [96e915c] + +### Fixes + +- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09] +- fix outline when using nodot in ginkgo v2 [dca77c8] +- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f] +- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14] + +### Maintenance + +- Bump to go 1.20 [4fcd0b3] + ## 2.14.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70ecc..a3e8237e9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -783,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 000000000..3c5079ff4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d028..5f35864dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -184,7 +172,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccbf..5d8d00bb1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab75..f0a6b5d26 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 639541a16..02c6739e5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -15,6 +15,11 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's GinkgoT() takes an optional offset argument that can be used to get the correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following: + +- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf. +- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model. + You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc227..6a15f19ae 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2515b84a1..2d2ea2fc3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -17,7 +17,7 @@ type specContext struct { context.Context *ProgressReporterManager - cancel context.CancelFunc + cancel context.CancelCauseFunc suite *Suite } @@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 6746152ec..a994ee3d6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +840,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -858,7 +858,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } sc := NewSpecContext(suite) - defer sc.cancel() + defer sc.cancel(fmt.Errorf("spec has finished")) suite.selectiveLock.Lock() suite.currentSpecContext = sc @@ -918,9 +918,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -958,8 +958,8 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck - sc.cancel() - //and now we wait for the grace period + sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() @@ -985,7 +985,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } progressReport = progressReport.WithoutOtherGoroutines() - sc.cancel() + sc.cancel(fmt.Errorf(interruptStatus.Message())) if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be758..4026859ec 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel @@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 816042208..43244a9bd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -195,6 +200,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +213,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2d..aa1a35176 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a7..cef273ee1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -89,6 +89,7 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool JSONReport string JUnitReport string @@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873..de69f3022 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 7015be128..851d42b45 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.14.0" +const VERSION = "2.17.1" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index fe72a7b18..01ec5245c 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,41 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + ## 1.30.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index c271a366a..ffb81b1fe 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.30.0" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bce..cde9e2ec8 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 43f994374..8860d677f 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -394,7 +394,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb919..4e3897858 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 0ca86a3dc..b2b89b017 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) @@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return FmtUnknown + return fmtUnknown } const textType = "text/plain" @@ -52,28 +53,28 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown + return fmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown + return fmtUnknown } - return FmtProtoDelim + return fmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown + return fmtUnknown } - return FmtText + return fmtText } - return FmtUnknown + return fmtUnknown } // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: + switch format.FormatType() { + case TypeProtoDelim: return &protoDecoder{r: r} } return &textDecoder{r: r} @@ -86,8 +87,10 @@ type protoDecoder struct { // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ca2140600..8fd806184 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,12 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "github.com/prometheus/common/model" + dto "github.com/prometheus/client_model/go" ) @@ -60,23 +62,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } } - return FmtText + return fmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,29 +95,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return fmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return fmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return fmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return fmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return fmtOpenMetrics_1_0_0 + escapingScheme + default: + return fmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return fmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -115,44 +137,48 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0d..6fc9555e3 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,30 +14,154 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Note that these + // values are now unexported. If code was relying on comparisons to these + // constants, instead use FormatType(). + fmtUnknown Format = `` + fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + fmtProtoDelim Format = protoFmt + ` encoding=delimited` + fmtProtoText Format = protoFmt + ` encoding=text` + fmtProtoCompact Format = protoFmt + ` encoding=compact-text` + fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return fmtProtoCompact + case TypeProtoDelim: + return fmtProtoDelim + case TypeProtoText: + return fmtProtoText + case TypeTextPlain: + return fmtText + case TypeOpenMetrics: + return fmtOpenMetrics_1_0_0 + default: + return fmtUnknown + } +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + if len(toks) < 2 { + return TypeUnknown + } + + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf0..5622578ed 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -35,6 +35,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -98,7 +110,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -303,21 +315,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -365,27 +365,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -451,7 +482,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1a..f9b8265a9 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(model.LabelValue(name)) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9d..26490211a 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7a..178fdbaf6 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -90,13 +90,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef8956335..3317ce22f 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false + switch NameValidationScheme { + case LegacyValidation: + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } } + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } return true } @@ -164,7 +172,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 000000000..447ab8ad6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7fe..0bd29b3a3 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,15 +18,84 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" ) +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +155,302 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(n) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme " + s) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c6..dc8a0026c 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d2..910b0b71f 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb440413..8050637d8 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a705..ae35cc2ab 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/regclient/regclient/.gitignore b/vendor/github.com/regclient/regclient/.gitignore index c96edf5f7..24207e4ef 100644 --- a/vendor/github.com/regclient/regclient/.gitignore +++ b/vendor/github.com/regclient/regclient/.gitignore @@ -1,6 +1,5 @@ -hack -/regctl artifacts/ bin/ output/ vendor/ +.regctl_conf_ci.json diff --git a/vendor/github.com/regclient/regclient/.version-bump.lock b/vendor/github.com/regclient/regclient/.version-bump.lock index 1ed4e4ea3..cb8d625d1 100644 --- a/vendor/github.com/regclient/regclient/.version-bump.lock +++ b/vendor/github.com/regclient/regclient/.version-bump.lock @@ -1,34 +1,104 @@ -{"name":"gha-uses","key":"actions/checkout","version":"v3"} -{"name":"gha-uses","key":"actions/setup-go","version":"v3"} -{"name":"gha-uses","key":"actions/stale","version":"v7"} -{"name":"gha-uses","key":"actions/upload-artifact","version":"v3"} -{"name":"gha-uses","key":"docker/build-push-action","version":"v4"} -{"name":"gha-uses","key":"docker/login-action","version":"v2"} -{"name":"gha-uses","key":"docker/setup-buildx-action","version":"v2"} -{"name":"gha-uses","key":"softprops/action-gh-release","version":"v1"} -{"name":"gha-uses-semver","key":"anchore/sbom-action","version":"v0.14.1"} -{"name":"gha-uses-semver","key":"sigstore/cosign-installer","version":"v3.0.2"} -{"name":"gha-uses-vx","key":"actions/checkout","version":"v3"} -{"name":"gha-uses-vx","key":"actions/setup-go","version":"v4"} -{"name":"gha-uses-vx","key":"actions/stale","version":"v8"} -{"name":"gha-uses-vx","key":"actions/upload-artifact","version":"v3"} -{"name":"gha-uses-vx","key":"docker/build-push-action","version":"v4"} -{"name":"gha-uses-vx","key":"docker/login-action","version":"v2"} -{"name":"gha-uses-vx","key":"docker/setup-buildx-action","version":"v2"} -{"name":"gha-uses-vx","key":"softprops/action-gh-release","version":"v1"} +{"name":"gha-uses-semver","key":"actions/checkout","version":"v4.1.2"} +{"name":"gha-uses-semver","key":"actions/setup-go","version":"v5.0.0"} +{"name":"gha-uses-semver","key":"actions/stale","version":"v9.0.0"} +{"name":"gha-uses-semver","key":"actions/upload-artifact","version":"v4.3.1"} +{"name":"gha-uses-semver","key":"anchore/sbom-action","version":"v0.15.9"} +{"name":"gha-uses-semver","key":"docker/build-push-action","version":"v5.3.0"} +{"name":"gha-uses-semver","key":"docker/login-action","version":"v3.1.0"} +{"name":"gha-uses-semver","key":"docker/setup-buildx-action","version":"v3.2.0"} +{"name":"gha-uses-semver","key":"github/codeql-action","version":"v3.24.9"} +{"name":"gha-uses-semver","key":"ossf/scorecard-action","version":"v2.3.1"} +{"name":"gha-uses-semver","key":"sigstore/cosign-installer","version":"v3.4.0"} +{"name":"gha-uses-semver","key":"softprops/action-gh-release","version":"v2.0.4"} {"name":"git-commit","key":"https://github.com/GoogleCloudPlatform/docker-credential-gcr.git:master","version":"62afb2723512fd6572c6300024e0c94f734b0ae3"} -{"name":"git-commit","key":"https://github.com/awslabs/amazon-ecr-credential-helper.git:main","version":"077b4a917a90e9f927e4207f8b6d22f9a057b7cb"} -{"name":"git-commit","key":"https://github.com/grafi-tt/lunajson.git:master","version":"e6063d37de97dfdfe2749e24de949472bfad0945"} +{"name":"git-commit","key":"https://github.com/awslabs/amazon-ecr-credential-helper.git:main","version":"1fd604ae58de3526cbf497b11f8de4083aa3b14f"} +{"name":"git-commit","key":"https://github.com/grafi-tt/lunajson.git:master","version":"3d10600874527d71519b33ecbb314eb93ccd1df6"} {"name":"git-commit","key":"https://github.com/kikito/semver.lua.git:master","version":"af495adc857d51fd1507a112be18523828a1da0d"} -{"name":"git-tag-semver","key":"github.com/dominikh/go-tools","version":"v0.4.3"} -{"name":"git-tag-semver","key":"github.com/sigstore/cosign","version":"v2.0.1"} -{"name":"registry-digest-arg","key":"alpine:3","version":"sha256:124c7d2707904eea7431fffe91522a01e5a861a624ee31d03372cc1d138a3126"} -{"name":"registry-digest-arg","key":"golang:1.19-alpine","version":"sha256:2381c1e5f8350a901597d633b2e517775eeac7a6682be39225a93b22cfd0f8bb"} -{"name":"registry-digest-arg","key":"golang:1.20-alpine","version":"sha256:08e9c086194875334d606765bd60aa064abd3c215abfbcf5737619110d48d114"} -{"name":"registry-digest-match","key":"anchore/syft:v0.72.0","version":"sha256:f4735b4c31d4b9ad979eb650712cdcdb1156afe80c7d63653ac87439b379e468"} -{"name":"registry-digest-match","key":"anchore/syft:v0.73.0","version":"sha256:6a13beb5cc8eedb2a42a5b91a69a36105880a2a1fdba75cc8c4dbcd61a0eed2b"} -{"name":"registry-digest-match","key":"anchore/syft:v0.77.0","version":"sha256:0135d844712731b86fab20ea584e594ad25b9f5a7fd262b5feb490e865edef89"} -{"name":"registry-golang-latest","key":"golang-latest","version":"1.20"} -{"name":"registry-golang-matrix","key":"golang-matrix","version":"[\"1.18\", \"1.19\", \"1.20\"]"} -{"name":"registry-tag-arg-semver","key":"anchore/syft","version":"v0.77.0"} -{"name":"registry-tag-match-semver","key":"anchore/syft","version":"v0.77.0"} +{"name":"git-tag-semver","key":"github.com/GoogleCloudPlatform/docker-credential-gcr","version":"v2.1.22"} +{"name":"git-tag-semver","key":"github.com/dominikh/go-tools","version":"v0.4.7"} +{"name":"git-tag-semver","key":"github.com/google/osv-scanner","version":"v1.7.1"} +{"name":"git-tag-semver","key":"github.com/icholy/gomajor","version":"v0.10.1"} +{"name":"git-tag-semver","key":"github.com/securego/gosec","version":"v2.19.0"} +{"name":"git-tag-semver","key":"github.com/sigstore/cosign","version":"v2.2.3"} +{"name":"git-tag-semver","key":"go.googlesource.com/vuln","version":"v1.0.4"} +{"name":"github-commit-match","key":"actions/checkout:v4.1.1","version":"b4ffde65f46336ab88eb53be808477a3936bae11"} +{"name":"github-commit-match","key":"actions/checkout:v4.1.2","version":"9bb56186c3b09b4f86b1c65136769dd318469633"} +{"name":"github-commit-match","key":"actions/setup-go:v4.1.0","version":"93397bea11091df50f3d7e59dc26a7711a8bcfbe"} +{"name":"github-commit-match","key":"actions/setup-go:v5.0.0","version":"0c52d547c9bc32b1aa3301fd7a9cb496313a4491"} +{"name":"github-commit-match","key":"actions/stale:v8.0.0","version":"1160a2240286f5da8ec72b1c0816ce2481aabf84"} +{"name":"github-commit-match","key":"actions/stale:v9.0.0","version":"28ca1036281a5e5922ead5184a1bbf96e5fc984e"} +{"name":"github-commit-match","key":"actions/upload-artifact:v3.1.3","version":"a8a3f3ad30e3422c9c7b888a15615d19a852ae32"} +{"name":"github-commit-match","key":"actions/upload-artifact:v4.0.0","version":"c7d193f32edcb7bfad88892161225aeda64e9392"} +{"name":"github-commit-match","key":"actions/upload-artifact:v4.1.0","version":"1eb3cb2b3e0f29609092a73eb033bb759a334595"} +{"name":"github-commit-match","key":"actions/upload-artifact:v4.3.0","version":"26f96dfa697d77e81fd5907df203aa23a56210a8"} +{"name":"github-commit-match","key":"actions/upload-artifact:v4.3.1","version":"5d5d22a31266ced268874388b861e4b58bb5c2f3"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.14.3","version":"78fc58e266e87a38d4194b2137a3d4e9bcaf7ca1"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.0","version":"fd74a6fb98a204a1ad35bbfae0122c1a302ff88b"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.1","version":"5ecf649a417b8ae17dc8383dc32d46c03f2312df"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.3","version":"c7f031d9249a826a082ea14c79d3b686a51d485a"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.5","version":"24b0d5238516480139aa8bc6f92eeb7b54a9eb0a"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.8","version":"b6a39da80722a2cb0ef5d197531764a89b5d48c3"} +{"name":"github-commit-match","key":"anchore/sbom-action:v0.15.9","version":"9fece9e20048ca9590af301449208b2b8861333b"} +{"name":"github-commit-match","key":"docker/build-push-action:v5.1.0","version":"4a13e500e55cf31b7a5d59a38ab2040ab0f42f56"} +{"name":"github-commit-match","key":"docker/build-push-action:v5.2.0","version":"af5a7ed5ba88268d5278f7203fb52cd833f66d6e"} +{"name":"github-commit-match","key":"docker/build-push-action:v5.3.0","version":"2cdde995de11925a030ce8070c3d77a52ffcf1c0"} +{"name":"github-commit-match","key":"docker/login-action:v3.0.0","version":"343f7c4344506bcbf9b4de18042ae17996df046d"} +{"name":"github-commit-match","key":"docker/login-action:v3.1.0","version":"e92390c5fb421da1463c202d546fed0ec5c39f20"} +{"name":"github-commit-match","key":"docker/setup-buildx-action:v3.0.0","version":"f95db51fddba0c2d1ec667646a06c2ce06100226"} +{"name":"github-commit-match","key":"docker/setup-buildx-action:v3.1.0","version":"0d103c3126aa41d772a8362f6aa67afac040f80c"} +{"name":"github-commit-match","key":"docker/setup-buildx-action:v3.2.0","version":"2b51285047da1547ffb1b2203d8be4c0af6b1f20"} +{"name":"github-commit-match","key":"github/codeql-action:v2.22.7","version":"66b90a5db151a8042fa97405c6cf843bbe433f7b"} +{"name":"github-commit-match","key":"github/codeql-action:v2.22.8","version":"407ffafae6a767df3e0230c3df91b6443ae8df75"} +{"name":"github-commit-match","key":"github/codeql-action:v2.22.9","version":"c0d1daa7f7e14667747d73a7dbbe8c074bc8bfe2"} +{"name":"github-commit-match","key":"github/codeql-action:v3.22.11","version":"b374143c1149a9115d881581d29b8390bbcbb59c"} +{"name":"github-commit-match","key":"github/codeql-action:v3.22.12","version":"012739e5082ff0c22ca6d6ab32e07c36df03c4a4"} +{"name":"github-commit-match","key":"github/codeql-action:v3.23.0","version":"e5f05b81d5b6ff8cfa111c80c22c5fd02a384118"} +{"name":"github-commit-match","key":"github/codeql-action:v3.23.1","version":"0b21cf2492b6b02c465a3e5d7c473717ad7721ba"} +{"name":"github-commit-match","key":"github/codeql-action:v3.23.2","version":"b7bf0a3ed3ecfa44160715d7c442788f65f0f923"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.0","version":"e8893c57a1f3a2b659b6b55564fdfdbbd2982911"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.3","version":"379614612a29c9e28f31f39a59013eb8012a51f0"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.5","version":"47b3d888fe66b639e431abf22ebca059152f1eea"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.6","version":"8a470fddafa5cbb6266ee11b37ef4d8aae19c571"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.8","version":"05963f47d870e2cb19a537396c1f668a348c7d8f"} +{"name":"github-commit-match","key":"github/codeql-action:v3.24.9","version":"1b1aada464948af03b950897e5eb522f92603cc2"} +{"name":"github-commit-match","key":"ossf/scorecard-action:v2.3.1","version":"0864cf19026789058feabb7e87baa5f140aac736"} +{"name":"github-commit-match","key":"regclient/actions:main","version":"d8097ee5dd5cdf150516315919b58509fc7f4cfa"} +{"name":"github-commit-match","key":"sigstore/cosign-installer:v3.2.0","version":"1fc5bd396d372bee37d608f955b336615edf79c8"} +{"name":"github-commit-match","key":"sigstore/cosign-installer:v3.3.0","version":"9614fae9e5c5eddabb09f90a270fcb487c9f7149"} +{"name":"github-commit-match","key":"sigstore/cosign-installer:v3.4.0","version":"e1523de7571e31dbe865fd2e80c5c7c23ae71eb4"} +{"name":"github-commit-match","key":"softprops/action-gh-release:v0.1.15","version":"de2c0eb89ae2a093876385947365aca7b0e5f844"} +{"name":"github-commit-match","key":"softprops/action-gh-release:v2.0.3","version":"3198ee18f814cdf787321b4a32a26ddbf37acc52"} +{"name":"github-commit-match","key":"softprops/action-gh-release:v2.0.4","version":"9d7c94cfd0a1f3ed45544c887983e9fa900f0564"} +{"name":"registry-digest-arg","key":"docker.io/library/alpine:3.18.4","version":"sha256:eece025e432126ce23f223450a0326fbebde39cdf496a85d8c016293fc851978"} +{"name":"registry-digest-arg","key":"docker.io/library/alpine:3.18.5","version":"sha256:34871e7290500828b39e22294660bee86d966bc0017544e848dd9a255cdf59e0"} +{"name":"registry-digest-arg","key":"docker.io/library/alpine:3.19.0","version":"sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48"} +{"name":"registry-digest-arg","key":"docker.io/library/alpine:3.19.1","version":"sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b"} +{"name":"registry-digest-arg","key":"docker.io/library/golang:1.21.4-alpine","version":"sha256:70afe55365a265f0762257550bc38440e0d6d6b97020d3f8c85328f00200dd8e"} +{"name":"registry-digest-arg","key":"docker.io/library/golang:1.21.5-alpine","version":"sha256:4db4aac30880b978cae5445dd4a706215249ad4f43d28bd7cdf7906e9be8dd6b"} +{"name":"registry-digest-arg","key":"docker.io/library/golang:1.21.6-alpine","version":"sha256:a6a7f1fcf12f5efa9e04b1e75020931a616cd707f14f62ab5262bfbe109aa84a"} +{"name":"registry-digest-arg","key":"docker.io/library/golang:1.22.0-alpine","version":"sha256:8e96e6cff6a388c2f70f5f662b64120941fcd7d4b89d62fec87520323a316bd9"} +{"name":"registry-digest-arg-match","key":"docker.io/library/alpine:3.19.1","version":"sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b"} +{"name":"registry-digest-arg-match","key":"docker.io/library/golang:1.22.0-alpine","version":"sha256:8e96e6cff6a388c2f70f5f662b64120941fcd7d4b89d62fec87520323a316bd9"} +{"name":"registry-digest-arg-match","key":"docker.io/library/golang:1.22.1-alpine","version":"sha256:0466223b8544fb7d4ff04748acc4d75a608234bf4e79563bff208d2060c0dd79"} +{"name":"registry-digest-match","key":"anchore/syft:v0.100.0","version":"sha256:df7b07bfadff45e0135d74f22478f47b16ac6aff4e8dbd93133fcae3bbbb790d"} +{"name":"registry-digest-match","key":"anchore/syft:v0.101.1","version":"sha256:ea3df0f0d7b85b352911f2b4a06fd08e2f19cdc793812c8766f7813b6a1e98cb"} +{"name":"registry-digest-match","key":"anchore/syft:v0.102.0","version":"sha256:fbbdb5f60a9db9400d49801bf70b19c29ac054b370dbccf538399918bbdf38a7"} +{"name":"registry-digest-match","key":"anchore/syft:v0.103.1","version":"sha256:96c56b2554079d90e5fc8999278638ecca6454713fceebd26321d0698975b243"} +{"name":"registry-digest-match","key":"anchore/syft:v0.104.0","version":"sha256:aeb052d18121587de9138ca715fc3207ef86358a56bd49ebd2d3ba27169c09d1"} +{"name":"registry-digest-match","key":"anchore/syft:v0.105.0","version":"sha256:e3dbedff17aaec7d06c6509fa42c6454ee2ed346299606fce6589096dc9efd70"} +{"name":"registry-digest-match","key":"anchore/syft:v0.97.1","version":"sha256:abc8d4310c54b56dd1e789d5f60b8ebc43f472652b34971d4b0d0dbed7f4ebda"} +{"name":"registry-digest-match","key":"anchore/syft:v0.98.0","version":"sha256:b353bf516310fcbc86676bb20849929298034e80f15873e63da18acdf7080b4e"} +{"name":"registry-digest-match","key":"anchore/syft:v0.99.0","version":"sha256:07d598b6a95280ed6ecc128685192173a00f370b5326cf50c62500d559075e1d"} +{"name":"registry-digest-match","key":"anchore/syft:v1.0.1","version":"sha256:d49defada853900861d55491ba549ab334148d51b11f23942abecb39ea83d4db"} +{"name":"registry-golang-latest","key":"golang-latest","version":"1.22"} +{"name":"registry-golang-matrix","key":"golang-matrix","version":"[\"1.20\", \"1.21\", \"1.22\"]"} +{"name":"registry-golang-oldest","key":"golang-oldest","version":"1.20"} +{"name":"registry-tag-arg-semver","key":"anchore/syft","version":"v1.0.1"} +{"name":"registry-tag-arg-semver","key":"davidanson/markdownlint-cli2","version":"v0.12.1"} +{"name":"registry-tag-arg-semver","key":"docker.io/library/alpine","version":"3.19.1"} +{"name":"registry-tag-arg-semver","key":"docker.io/library/golang","version":"1.22.1"} +{"name":"registry-tag-arg-semver","key":"docker.io/library/registry","version":"2.8.3"} +{"name":"registry-tag-arg-semver","key":"ghcr.io/igorshubovych/markdownlint-cli","version":"v0.35.0"} +{"name":"registry-tag-arg-semver","key":"ghcr.io/project-zot/zot-linux-amd64","version":"v2.0.2"} +{"name":"registry-tag-arg-semver-major","key":"docker.io/library/alpine","version":"3"} +{"name":"registry-tag-match-semver","key":"anchore/syft","version":"v1.0.1"} diff --git a/vendor/github.com/regclient/regclient/.version-bump.yml b/vendor/github.com/regclient/regclient/.version-bump.yml index c68dac167..c01c85609 100644 --- a/vendor/github.com/regclient/regclient/.version-bump.yml +++ b/vendor/github.com/regclient/regclient/.version-bump.yml @@ -1,46 +1,73 @@ files: "build/Dockerfile*": scans: - - docker-arg-alpine + - docker-arg-alpine-tag + - docker-arg-alpine-digest - docker-arg-go-tag - - docker-arg-go + - docker-arg-go-digest - git-commit-ecr - - git-commit-gcr + - git-tag-gcr - git-commit-lunajson - git-commit-semver + "build/oci-image.sh": + scans: + - shell-alpine-tag + - shell-alpine-tag-comment + - shell-alpine-digest ".github/workflows/*.yml": scans: - gha-golang-matrix - gha-golang-release - gha-uses-vx - gha-uses-semver + - gha-uses-commit - gha-syft-version - gha-cosign-version + - gha-alpine-tag + - gha-alpine-tag-comment + - gha-alpine-digest "Makefile": scans: + - makefile-gomajor + - makefile-go-vulncheck + - makefile-markdown-lint + - makefile-gosec + - makefile-osv-scanner - makefile-staticcheck - makefile-syft-version - makefile-syft-version2 - makefile-syft-digest + - makefile-ci-distribution + - makefile-ci-zot + "go.mod": + scans: + - go-mod-golang-release scans: - docker-arg-alpine: + docker-arg-alpine-tag: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^ARG ALPINE_VER=(?P\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + repo: "docker.io/library/alpine" + docker-arg-alpine-digest: type: "regexp" - source: "registry-digest-arg" + source: "registry-digest-arg-match" args: - regexp: '^ARG ALPINE_VER=(?P[a-z0-9\-\.]+)@(?Psha256:[0-9a-f]+)\s*$' - image: "alpine" + regexp: '^ARG ALPINE_VER=(?P\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + image: "docker.io/library/alpine" docker-arg-go-tag: type: "regexp" - source: "registry-golang-latest" + source: "registry-tag-arg-semver" args: regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)-alpine@(?Psha256:[0-9a-f]+)\s*$' - docker-arg-go: + repo: "docker.io/library/golang" + docker-arg-go-digest: type: "regexp" - source: "registry-digest-arg" + source: "registry-digest-arg-match" args: regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)@(?Psha256:[0-9a-f]+)\s*$' - image: "golang" + image: "docker.io/library/golang" git-commit-ecr: type: "regexp" source: "git-commit" @@ -48,12 +75,12 @@ scans: regexp: '^ARG ECR_HELPER_VER=(?P[0-9a-f]+)\s*$' repo: "https://github.com/awslabs/amazon-ecr-credential-helper.git" ref: main - git-commit-gcr: + git-tag-gcr: type: "regexp" - source: "git-commit" + source: "git-tag-semver" args: - regexp: '^ARG GCR_HELPER_VER=(?P[0-9a-f]+)\s*$' - repo: "https://github.com/GoogleCloudPlatform/docker-credential-gcr.git" + regexp: '^ARG GCR_HELPER_VER=(?P[^\s]+)\s*$' + repo: "github.com/GoogleCloudPlatform/docker-credential-gcr" ref: master git-commit-lunajson: type: "regexp" @@ -73,12 +100,17 @@ scans: type: "regexp" source: "gha-uses-vx" args: - regexp: '^\s+-?\s+uses: (?P[^@]+)@(?Pv\d+)\s*$' + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv\d+)\s*$' gha-uses-semver: type: "regexp" source: "gha-uses-semver" args: - regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?Pv\d+\.\d+\.\d+)\s*$' + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv\d+\.\d+\.\d+)\s*$' + gha-uses-commit: + type: "regexp" + source: "github-commit-match" + args: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?P[\w\d\.]+)\s*$' gha-golang-matrix: type: "regexp" source: "registry-golang-matrix" @@ -101,6 +133,71 @@ scans: args: regexp: '^\s*cosign-release: "(?Pv[0-9\.]+)"\s*$' repo: "github.com/sigstore/cosign" + gha-alpine-tag: + type: "regexp" + source: "registry-tag-arg-semver-major" + args: + regexp: '^\s*ALPINE_NAME: "alpine:(?P\d+)"\s*$' + repo: "docker.io/library/alpine" + gha-alpine-tag-comment: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + repo: "docker.io/library/alpine" + gha-alpine-digest: + type: "regexp" + source: "registry-digest-arg-match" + args: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + image: "docker.io/library/alpine" + go-mod-golang-release: + type: "regexp" + source: "registry-golang-oldest" + args: + regexp: '^go (?P[0-9\.]+)\s*$' + makefile-ci-distribution: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^CI_DISTRIBUTION_VER\?=(?Pv?[0-9\.]+)\s*$' + repo: "docker.io/library/registry" + makefile-ci-zot: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^CI_ZOT_VER\?=(?Pv?[0-9\.]+)\s*$' + repo: "ghcr.io/project-zot/zot-linux-amd64" + makefile-gomajor: + type: "regexp" + source: "git-tag-semver" + args: + regexp: '^GOMAJOR_VER\?=(?Pv[0-9\.]+)\s*$' + repo: "github.com/icholy/gomajor" + makefile-go-vulncheck: + type: "regexp" + source: "git-tag-semver" + args: + regexp: '^GO_VULNCHECK_VER\?=(?Pv[0-9\.]+)\s*$' + repo: "go.googlesource.com/vuln" + makefile-gosec: + type: "regexp" + source: "git-tag-semver" + args: + regexp: '^GOSEC_VER\?=(?Pv[0-9\.]+)\s*$' + repo: "github.com/securego/gosec" + makefile-markdown-lint: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^MARKDOWN_LINT_VER\?=(?Pv[0-9\.]+)\s*$' + repo: "davidanson/markdownlint-cli2" + makefile-osv-scanner: + type: "regexp" + source: "git-tag-semver" + args: + regexp: '^OSV_SCANNER_VER\?=(?Pv[0-9\.]+)\s*$' + repo: "github.com/google/osv-scanner" makefile-staticcheck: type: "regexp" source: "git-tag-semver" @@ -123,6 +220,24 @@ scans: source: "registry-digest-match" args: regexp: '^SYFT_CONTAINER\?=(?P[^:]*):(?Pv[0-9\.]+)@(?Psha256:[0-9a-f]+)\s*$' + shell-alpine-tag: + type: "regexp" + source: "registry-tag-arg-semver-major" + args: + regexp: '^\s*ALPINE_NAME="alpine:(?P\d+)"\s*$' + repo: "docker.io/library/alpine" + shell-alpine-tag-comment: + type: "regexp" + source: "registry-tag-arg-semver" + args: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + repo: "docker.io/library/alpine" + shell-alpine-digest: + type: "regexp" + source: "registry-digest-arg-match" + args: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + image: "docker.io/library/alpine" sources: registry-tag-arg-semver: @@ -132,9 +247,20 @@ sources: type: "tag" repo: "{{ .ScanArgs.repo }}" filter: - expr: '^v[0-9]+\.[0-9]+\.[0-9]+$' + expr: '^v?[0-9]+\.[0-9]+\.[0-9]+$' sort: method: "semver" + registry-tag-arg-semver-major: + type: "registry" + key: "{{ .ScanArgs.repo }}" + args: + type: "tag" + repo: "{{ .ScanArgs.repo }}" + filter: + expr: '^v?[0-9]+\.[0-9]+\.[0-9]+$' + sort: + method: "semver" + template: '{{ index ( split .Version "." ) 0 }}' registry-tag-match-semver: type: "registry" key: "{{ .ScanMatch.Repo }}" @@ -142,10 +268,10 @@ sources: type: "tag" repo: "{{ .ScanMatch.Repo }}" filter: - expr: '^v[0-9]+\.[0-9]+\.[0-9]+$' + expr: '^v?[0-9]+\.[0-9]+\.[0-9]+$' sort: method: "semver" - registry-digest-arg: + registry-digest-arg-match: type: "registry" key: "{{ .ScanArgs.image }}:{{.ScanMatch.Tag}}" args: @@ -165,6 +291,17 @@ sources: expr: '^\d+\.\d+$' sort: method: "semver" + registry-golang-oldest: + type: "registry" + key: "golang-oldest" + args: + repo: "golang" + type: "tag" + filter: + expr: '^\d+\.\d+$' + sort: + method: "semver" + template: '{{ index .VerMap ( index .VerList 2 ) }}' registry-golang-matrix: type: "registry" key: "golang-matrix" @@ -214,3 +351,12 @@ sources: expr: '^v[0-9]+\.[0-9]+\.[0-9]+$' sort: method: "semver" + github-commit-match: + type: "git" + key: "{{ .ScanMatch.Repo }}:{{ .ScanMatch.Ref }}" + args: + type: "commit" + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + ref: "{{ .ScanMatch.Ref }}" + filter: + expr: "^{{ .ScanMatch.Ref }}$" diff --git a/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md index 92ba58ac0..5498720f1 100644 --- a/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md +++ b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md @@ -61,8 +61,8 @@ representative at an online or offline event. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at -git@bmitch.net, or via DM on twitter (@sudo_bmitch) or slack (I'm found on the -Docker, CNCF, and OCI slacks). + or slack (I'm found on the CNCF, Docker, OCI, and OpenSSF +slacks). All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the diff --git a/vendor/github.com/regclient/regclient/CONTRIBUTING.md b/vendor/github.com/regclient/regclient/CONTRIBUTING.md index 178450297..d1d677538 100644 --- a/vendor/github.com/regclient/regclient/CONTRIBUTING.md +++ b/vendor/github.com/regclient/regclient/CONTRIBUTING.md @@ -2,16 +2,29 @@ ## Reporting security issues -Please send security issues to git@bmitch.net. +Please see [SECURITY.md](security.md) for the process to report security issues. ## Reporting other issues Please search for similar issues and if none are seen, report an issue at [github.com/regclient/regclient/issues](https://github.com/regclient/regclient/issues) +## Code style + +This project attempts to follow these principles: + +- Code is canonical Go, following styles and patterns commonly used by the Go community. +- Dependencies outside of the Go standard library should be minimized. +- Dependencies should be pinned to a specific digest and tracked by Go or version-check. +- Unit tests are strongly encouraged with a focus on test coverage of the successful path and common errors. +- Linters and other style formatting tools are used, please run `make all` before committing any changes. + ## Pull requests -PR's are welcome. -For anything beyond a minor fix, opening an issue is suggested to discuss possible solutions. +PRs are welcome following the below guides: + +- For anything beyond a minor fix, opening an issue is suggested to discuss possible solutions. +- Changes should be rebased on the main branch. +- Changes should be squashed to a single commit per logical change. All changes must be signed (`git commit -s`) to indicate you agree to the [Developer Certificate or Origin](https://developercertificate.org/): diff --git a/vendor/github.com/regclient/regclient/Makefile b/vendor/github.com/regclient/regclient/Makefile index 6761a9d72..58e517a49 100644 --- a/vendor/github.com/regclient/regclient/Makefile +++ b/vendor/github.com/regclient/regclient/Makefile @@ -9,12 +9,20 @@ VCS_REF?=$(shell git rev-list -1 HEAD) ifneq ($(shell git status --porcelain 2>/dev/null),) VCS_REF := $(VCS_REF)-dirty endif -VCS_DATE?=$(shell date -d "@$(shell git log -1 --format=%at)" +%Y-%m-%dT%H:%M:%SZ --utc) +VCS_VERSION?=$(shell vcs_describe="$$(git describe --all)"; \ + vcs_version="(devel)"; \ + if [ "$${vcs_describe}" != "$${vcs_describe#tags/}" ]; then \ + vcs_version="$${vcs_describe#tags/}"; \ + elif [ "$${vcs_describe}" != "$${vcs_describe#heads/}" ]; then \ + vcs_version="$${vcs_describe#heads/}"; \ + if [ "main" = "$${vcs_version}" ]; then vcs_version=edge; fi; \ + fi; \ + echo "$${vcs_version}" | sed -r 's#/+#-#g') VCS_TAG?=$(shell git describe --tags --abbrev=0 2>/dev/null || true) LD_FLAGS?=-s -w -extldflags -static -buildid= -X \"github.com/regclient/regclient/internal/version.vcsTag=$(VCS_TAG)\" GO_BUILD_FLAGS?=-trimpath -ldflags "$(LD_FLAGS)" -tags nolegacy DOCKERFILE_EXT?=$(shell if docker build --help 2>/dev/null | grep -q -- '--progress'; then echo ".buildkit"; fi) -DOCKER_ARGS?=--build-arg "VCS_REF=$(VCS_REF)" +DOCKER_ARGS?=--build-arg "VCS_REF=$(VCS_REF)" --build-arg "VCS_VERSION=$(VCS_VERSION)" GOPATH?=$(shell go env GOPATH) PWD:=$(shell pwd) VER_BUMP?=$(shell command -v version-bump 2>/dev/null) @@ -25,75 +33,143 @@ ifeq "$(strip $(VER_BUMP))" '' -u "$(shell id -u):$(shell id -g)" \ $(VER_BUMP_CONTAINER) endif +MARKDOWN_LINT_VER?=v0.12.1 +GOMAJOR_VER?=v0.10.1 +GOSEC_VER?=v2.19.0 +GO_VULNCHECK_VER?=v1.0.4 +OSV_SCANNER_VER?=v1.7.1 SYFT?=$(shell command -v syft 2>/dev/null) SYFT_CMD_VER:=$(shell [ -x "$(SYFT)" ] && echo "v$$($(SYFT) version | awk '/^Version: / {print $$2}')" || echo "0") -SYFT_VERSION?=v0.77.0 -SYFT_CONTAINER?=anchore/syft:v0.77.0@sha256:0135d844712731b86fab20ea584e594ad25b9f5a7fd262b5feb490e865edef89 +SYFT_VERSION?=v1.0.1 +SYFT_CONTAINER?=anchore/syft:v1.0.1@sha256:d49defada853900861d55491ba549ab334148d51b11f23942abecb39ea83d4db ifneq "$(SYFT_CMD_VER)" "$(SYFT_VERSION)" SYFT=docker run --rm \ -v "$(shell pwd)/:$(shell pwd)/" -w "$(shell pwd)" \ -u "$(shell id -u):$(shell id -g)" \ $(SYFT_CONTAINER) endif -STATICCHECK_VER?=v0.4.3 - -.PHONY: all fmt vet test lint lint-go lint-md vendor binaries docker artifacts artifact-pre plugin-user plugin-host .FORCE +STATICCHECK_VER?=v0.4.7 +CI_DISTRIBUTION_VER?=2.8.3 +CI_ZOT_VER?=v2.0.2 +.PHONY: .FORCE .FORCE: -all: fmt vet test lint binaries ## Full build of Go binaries (including fmt, vet, test, and lint) +.PHONY: all +all: fmt goimports vet test lint binaries ## Full build of Go binaries (including fmt, vet, test, and lint) +.PHONY: fmt fmt: ## go fmt go fmt ./... +goimports: $(GOPATH)/bin/goimports + $(GOPATH)/bin/goimports -w -format-only -local github.com/regclient . + +.PHONY: vet vet: ## go vet go vet ./... +.PHONY: test test: ## go test - go test -cover ./... + go test -cover -race ./... -lint: lint-go lint-md ## Run all linting +.PHONY: lint +lint: lint-go lint-goimports lint-md lint-gosec ## Run all linting +.PHONY: lint-go lint-go: $(GOPATH)/bin/staticcheck .FORCE ## Run linting for Go $(GOPATH)/bin/staticcheck -checks all ./... +lint-goimports: $(GOPATH)/bin/goimports + @if [ -n "$$($(GOPATH)/bin/goimports -l -format-only -local github.com/regclient .)" ]; then \ + echo $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + exit 1; \ + fi + +# excluding types/platform pending resultion to https://github.com/securego/gosec/issues/1116 +.PHONY: lint-gosec +lint-gosec: $(GOPATH)/bin/gosec .FORCE ## Run gosec + $(GOPATH)/bin/gosec -terse -exclude-dir types/platform ./... + +.PHONY: lint-md lint-md: .FORCE ## Run linting for markdown - docker run --rm -v "$(PWD):/workdir:ro" ghcr.io/igorshubovych/markdownlint-cli:latest \ - --ignore vendor . + docker run --rm -v "$(PWD):/workdir:ro" davidanson/markdownlint-cli2:$(MARKDOWN_LINT_VER) \ + "**/*.md" "#vendor" + +.PHONY: vulnerability-scan +vulnerability-scan: osv-scanner vulncheck-go ## Run all vulnerability scanners + +.PHONY: osv-scanner +osv-scanner: $(GOPATH)/bin/osv-scanner .FORCE ## Run OSV Scanner + $(GOPATH)/bin/osv-scanner scan -r --experimental-licenses="Apache-2.0,BSD-3-Clause,MIT,CC-BY-SA-4.0,UNKNOWN" . +.PHONY: vulncheck-go +vulncheck-go: $(GOPATH)/bin/govulncheck .FORCE ## Run govulncheck + $(GOPATH)/bin/govulncheck ./... + +.PHONY: vendor vendor: ## Vendor Go modules go mod vendor -binaries: vendor $(BINARIES) ## Build Go binaries +.PHONY: binaries +binaries: $(BINARIES) ## Build Go binaries bin/%: .FORCE CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/$* ./cmd/$* +.PHONY: docker docker: $(IMAGES) ## Build Docker images docker-%: .FORCE docker build -t regclient/$* -f build/Dockerfile.$*$(DOCKERFILE_EXT) $(DOCKER_ARGS) . docker build -t regclient/$*:alpine -f build/Dockerfile.$*$(DOCKERFILE_EXT) --target release-alpine $(DOCKER_ARGS) . +.PHONY: oci-image oci-image: $(addprefix oci-image-,$(COMMANDS)) ## Build reproducible images to an OCI Layout oci-image-%: bin/regctl .FORCE - build/oci-image.sh -r scratch -i "$*" -p "$(TEST_PLATFORMS)" - build/oci-image.sh -r alpine -i "$*" -p "$(TEST_PLATFORMS)" -b "alpine:3" + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r scratch -i "$*" -p "$(TEST_PLATFORMS)" + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r alpine -i "$*" -p "$(TEST_PLATFORMS)" -b "alpine:3" +.PHONY: test-docker test-docker: $(addprefix test-docker-,$(COMMANDS)) ## Build multi-platform docker images (but do not tag) test-docker-%: docker buildx build --platform="$(TEST_PLATFORMS)" -f build/Dockerfile.$*.buildkit . docker buildx build --platform="$(TEST_PLATFORMS)" -f build/Dockerfile.$*.buildkit --target release-alpine . +.PHONY: ci +ci: ci-distribution ci-zot ## Run CI tests against self hosted registries + +.PHONY: ci-distribution +ci-distribution: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-distribution \ + -e "REGISTRY_STORAGE_DELETE_ENABLED=true" \ + docker.io/library/registry:$(CI_DISTRIBUTION_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-distribution 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-distribution + +.PHONY: ci-zot +ci-zot: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-zot \ + -v "$$(pwd)/build/zot-config.json:/etc/zot/config.json:ro" \ + ghcr.io/project-zot/zot-linux-amd64:$(CI_ZOT_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-zot 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-zot + +.PHONY: artifacts artifacts: $(ARTIFACTS) ## Generate artifacts +.PHONY: artifact-pre artifact-pre: mkdir -p artifacts artifacts/%: artifact-pre .FORCE - @target="$*"; \ + @set -e; \ + target="$*"; \ command="$${target%%-*}"; \ platform_ext="$${target#*-}"; \ platform="$${platform_ext%.*}"; \ @@ -103,31 +179,81 @@ artifacts/%: artifact-pre .FORCE echo export GOARCH=$${GOARCH}; \ echo go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ - $(SYFT) packages -q "file:$@" --name "$${command}" -o cyclonedx-json >"artifacts/$${command}-$${platform}.cyclonedx.json"; \ - $(SYFT) packages -q "file:$@" --name "$${command}" -o spdx-json >"artifacts/$${command}-$${platform}.spdx.json" + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o cyclonedx-json >"artifacts/$${command}-$${platform}.cyclonedx.json"; \ + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o spdx-json >"artifacts/$${command}-$${platform}.spdx.json" +.PHONY: plugin-user plugin-user: mkdir -p ${HOME}/.docker/cli-plugins/ cp docker-plugin/docker-regclient ${HOME}/.docker/cli-plugins/docker-regctl +.PHONY: plugin-host plugin-host: sudo cp docker-plugin/docker-regclient /usr/libexec/docker/cli-plugins/docker-regctl -util-golang-update: - go get -u +.PHONY: util-golang-major +util-golang-major: $(GOPATH)/bin/gomajor ## check for major dependency updates + $(GOPATH)/bin/gomajor list + +.PHONY: util-golang-update +util-golang-update: ## update go module versions + go get -u -t ./... go mod tidy - go mod vendor - -util-version-check: + [ ! -d vendor ] || go mod vendor + +.PHONY: util-release-preview +util-release-preview: $(GOPATH)/bin/gorelease ## preview changes for next release + git checkout main + ./.github/release.sh -d + gorelease + +.PHONY: util-release-run +util-release-run: ## generate a new release + git checkout main + ./.github/release.sh + +.PHONY: util-version-check +util-version-check: ## check all dependencies for updates $(VER_BUMP) check -util-version-update: +.PHONY: util-version-update +util-version-update: ## update versions on all dependencies $(VER_BUMP) update +$(GOPATH)/bin/gomajor: .FORCE + @[ -f "$(GOPATH)/bin/gomajor" ] \ + && [ "$$($(GOPATH)/bin/gomajor version | grep '^version' | cut -f 2 -d ' ')" = "$(GOMAJOR_VER)" ] \ + || go install github.com/icholy/gomajor@$(GOMAJOR_VER) + +$(GOPATH)/bin/goimports: .FORCE + @[ -f "$(GOPATH)/bin/goimports" ] \ + || go install golang.org/x/tools/cmd/goimports@latest + +$(GOPATH)/bin/gorelease: .FORCE + @[ -f "$(GOPATH)/bin/gorelease" ] \ + || go install golang.org/x/exp/cmd/gorelease@latest + +$(GOPATH)/bin/gosec: .FORCE + @[ -f $(GOPATH)/bin/gosec ] \ + && [ "$$($(GOPATH)/bin/gosec -version | grep '^Version' | cut -f 2 -d ' ')" = "$(GOSEC_VER)" ] \ + || go install -ldflags '-X main.Version=$(GOSEC_VER) -X main.GitTag=$(GOSEC_VER)' \ + github.com/securego/gosec/v2/cmd/gosec@$(GOSEC_VER) + $(GOPATH)/bin/staticcheck: .FORCE @[ -f $(GOPATH)/bin/staticcheck ] \ && [ "$$($(GOPATH)/bin/staticcheck -version | cut -f 3 -d ' ' | tr -d '()')" = "$(STATICCHECK_VER)" ] \ || go install "honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VER)" +$(GOPATH)/bin/govulncheck: .FORCE + @[ $$(go version -m $(GOPATH)/bin/govulncheck | \ + awk -F ' ' '{ if ($$1 == "mod" && $$2 == "golang.org/x/vuln") { printf "%s\n", $$3 } }') = "$(GO_VULNCHECK_VER)" ] \ + || CGO_ENABLED=0 go install "golang.org/x/vuln/cmd/govulncheck@$(GO_VULNCHECK_VER)" + +$(GOPATH)/bin/osv-scanner: .FORCE + @[ -f $(GOPATH)/bin/osv-scanner ] \ + && [ "$$(osv-scanner --version | awk -F ': ' '{ if ($$1 == "osv-scanner version") { printf "%s\n", $$2 } }')" = "$(OSV_SCANNER_VER)" ] \ + || CGO_ENABLED=0 go install "github.com/google/osv-scanner/cmd/osv-scanner@$(OSV_SCANNER_VER)" + +.PHONY: help help: # Display help @awk -F ':|##' '/^[^\t].+?:.*?##/ { printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF }' $(MAKEFILE_LIST) diff --git a/vendor/github.com/regclient/regclient/README.md b/vendor/github.com/regclient/regclient/README.md index 72f02b01e..91d0cff91 100644 --- a/vendor/github.com/regclient/regclient/README.md +++ b/vendor/github.com/regclient/regclient/README.md @@ -1,9 +1,16 @@ # regclient -![GitHub Workflow Status](https://img.shields.io/github/workflow/status/regclient/regclient/Go?label=Go%20build) -![GitHub Workflow Status](https://img.shields.io/github/workflow/status/regclient/regclient/Docker?label=Docker%20build) -![GitHub](https://img.shields.io/github/license/regclient/regclient) +[![Go Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/go.yml?branch=main&label=Go%20build)](https://github.com/regclient/regclient/actions/workflows/go.yml) +[![Docker Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/docker.yml?branch=main&label=Docker%20build)](https://github.com/regclient/regclient/actions/workflows/docker.yml) +[![Dependency Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/version-check.yml?branch=main&label=Dependency%20check)](https://github.com/regclient/regclient/actions/workflows/version-check.yml) +[![Vulnerability Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/vulnscans.yml?branch=main&label=Vulnerability%20check)](https://github.com/regclient/regclient/actions/workflows/vulnscans.yml) + [![Go Reference](https://pkg.go.dev/badge/github.com/regclient/regclient.svg)](https://pkg.go.dev/github.com/regclient/regclient) +![License](https://img.shields.io/github/license/regclient/regclient) +[![Go Report Card](https://goreportcard.com/badge/github.com/regclient/regclient)](https://goreportcard.com/report/github.com/regclient/regclient) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/regclient/regclient/badge)](https://securityscorecards.dev/viewer/?uri=github.com/regclient/regclient) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8088/badge)](https://www.bestpractices.dev/projects/8088) +[![GitHub Downloads](https://img.shields.io/github/downloads/regclient/regclient/total?label=GitHub%20downloads)](https://github.com/regclient/regclient/releases) Client interface for the registry API. This includes `regctl` for a command line interface to manage registries. diff --git a/vendor/github.com/regclient/regclient/SECURITY.md b/vendor/github.com/regclient/regclient/SECURITY.md index 68feee98e..50508cb45 100644 --- a/vendor/github.com/regclient/regclient/SECURITY.md +++ b/vendor/github.com/regclient/regclient/SECURITY.md @@ -1,3 +1,5 @@ # Reporting security issues -Please send security issues to git@bmitch.net. +Please report security issues directly in GitHub at or alternatively email . + +We will typically respond within 7 working days of your report. If the issue is confirmed as a vulnerability, we will open a Security Advisory and acknowledge your contributions as part of it. This project follows a 90 day disclosure timeline. diff --git a/vendor/github.com/regclient/regclient/blob.go b/vendor/github.com/regclient/regclient/blob.go index 7c184b406..c3cd7b698 100644 --- a/vendor/github.com/regclient/regclient/blob.go +++ b/vendor/github.com/regclient/regclient/blob.go @@ -3,22 +3,62 @@ package regclient import ( "bytes" "context" + "errors" + "fmt" "io" + "time" + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/throttle" + "github.com/regclient/regclient/scheme" "github.com/regclient/regclient/types" "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) -// BlobCopy copies a blob between two locations -// If the blob already exists in the target, the copy is skipped -// A server side cross repository blob mount is attempted -func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor) error { +const blobCBFreq = time.Millisecond * 100 + +type blobOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) +} + +// BlobOpts define options for the Image* commands. +type BlobOpts func(*blobOpt) + +// BlobWithCallback provides progress data to a callback function. +func BlobWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) BlobOpts { + return func(opts *blobOpt) { + opts.callback = callback + } +} + +// BlobCopy copies a blob between two locations. +// If the blob already exists in the target, the copy is skipped. +// A server side cross repository blob mount is attempted. +func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opts ...BlobOpts) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("refSrc is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("refTgt is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } + var opt blobOpt + for _, optFn := range opts { + optFn(&opt) + } tDesc := d tDesc.URLs = []string{} // ignore URLs when pushing to target + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } // for the same repository, there's nothing to copy if ref.EqualRepository(refSrc, refTgt) { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } rc.log.WithFields(logrus.Fields{ "src": refTgt.Reference, "tgt": refTgt.Reference, @@ -28,20 +68,50 @@ func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Re } // check if layer already exists if _, err := rc.BlobHead(ctx, refTgt, tDesc); err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } rc.log.WithFields(logrus.Fields{ "tgt": refTgt.Reference, "digest": d, }).Debug("Blob copy skipped, already exists") return nil } + // acquire throttle for both src and tgt to avoid deadlocks + tList := []*throttle.Throttle{} + schemeSrcAPI, err := rc.schemeGet(refSrc.Scheme) + if err != nil { + return err + } + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) + if err != nil { + return err + } + if tSrc, ok := schemeSrcAPI.(scheme.Throttler); ok { + tList = append(tList, tSrc.Throttle(refSrc, false)...) + } + if tTgt, ok := schemeTgtAPI.(scheme.Throttler); ok { + tList = append(tList, tTgt.Throttle(refTgt, true)...) + } + if len(tList) > 0 { + ctx, err = throttle.AcquireMulti(ctx, tList) + if err != nil { + return err + } + defer throttle.ReleaseMulti(ctx, tList) + } + // try mounting blob from the source repo is the registry is the same if ref.EqualRegistry(refSrc, refTgt) { err := rc.BlobMount(ctx, refSrc, refTgt, d) if err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } rc.log.WithFields(logrus.Fields{ "src": refTgt.Reference, "tgt": refTgt.Reference, - "digest": d, + "digest": d.Digest, }).Debug("Blob copy performed server side with registry mount") return nil } @@ -54,29 +124,61 @@ func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Re // fast options failed, download layer from source and push to target blobIO, err := rc.BlobGet(ctx, refSrc, d) if err != nil { - rc.log.WithFields(logrus.Fields{ - "err": err, - "src": refSrc.Reference, - "digest": d, - }).Warn("Failed to retrieve blob") + if !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "err": err, + "src": refSrc.Reference, + "digest": d.Digest, + }).Warn("Failed to retrieve blob") + } return err } + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + ticker := time.NewTicker(blobCBFreq) + done := make(chan bool) + defer func() { + close(done) + ticker.Stop() + if ctx.Err() == nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + }() + go func() { + for { + select { + case <-done: + return + case <-ticker.C: + offset, err := blobIO.Seek(0, io.SeekCurrent) + if err == nil && offset > 0 { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackActive, offset, d.Size) + } + } + } + }() + } defer blobIO.Close() if _, err := rc.BlobPut(ctx, refTgt, blobIO.GetDescriptor(), blobIO); err != nil { - rc.log.WithFields(logrus.Fields{ - "err": err, - "src": refSrc.Reference, - "tgt": refTgt.Reference, - }).Warn("Failed to push blob") + if !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "err": err, + "src": refSrc.Reference, + "tgt": refTgt.Reference, + }).Warn("Failed to push blob") + } return err } return nil } -// BlobDelete removes a blob from the registry -// This method should only be used to repair a damaged registry -// Typically a server side garbage collection should be used to purge unused blobs -func (rc *RegClient) BlobDelete(ctx context.Context, r ref.Ref, d types.Descriptor) error { +// BlobDelete removes a blob from the registry. +// This method should only be used to repair a damaged registry. +// Typically a server side garbage collection should be used to purge unused blobs. +func (rc *RegClient) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return err @@ -84,12 +186,16 @@ func (rc *RegClient) BlobDelete(ctx context.Context, r ref.Ref, d types.Descript return schemeAPI.BlobDelete(ctx, r, d) } -// BlobGet retrieves a blob, returning a reader -func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +// BlobGet retrieves a blob, returning a reader. +// This reader must be closed to free up resources that limit concurrent pulls. +func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { data, err := d.GetData() if err == nil { return blob.NewReader(blob.WithDesc(d), blob.WithRef(r), blob.WithReader(bytes.NewReader(data))), nil } + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return nil, err @@ -97,17 +203,23 @@ func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) return schemeAPI.BlobGet(ctx, r, d) } -// BlobGetOCIConfig retrieves an OCI config from a blob, automatically extracting the JSON -func (rc *RegClient) BlobGetOCIConfig(ctx context.Context, ref ref.Ref, d types.Descriptor) (blob.OCIConfig, error) { - b, err := rc.BlobGet(ctx, ref, d) +// BlobGetOCIConfig retrieves an OCI config from a blob, automatically extracting the JSON. +func (rc *RegClient) BlobGetOCIConfig(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.OCIConfig, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + b, err := rc.BlobGet(ctx, r, d) if err != nil { return nil, err } return b.ToOCIConfig() } -// BlobHead is used to verify if a blob exists and is accessible -func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +// BlobHead is used to verify if a blob exists and is accessible. +func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return nil, err @@ -115,8 +227,14 @@ func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor return schemeAPI.BlobHead(ctx, r, d) } -// BlobMount attempts to perform a server side copy/mount of the blob between repositories -func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor) error { +// BlobMount attempts to perform a server side copy/mount of the blob between repositories. +func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(refSrc.Scheme) if err != nil { return err @@ -125,13 +243,19 @@ func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.R } // BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// // This will attempt an anonymous blob mount first which some registries may support. // It will then try doing a full put of the blob without chunking (most widely supported). // If the full put fails, it will fall back to a chunked upload (useful for flaky networks). -func (rc *RegClient) BlobPut(ctx context.Context, ref ref.Ref, d types.Descriptor, rdr io.Reader) (types.Descriptor, error) { - schemeAPI, err := rc.schemeGet(ref.Scheme) +func (rc *RegClient) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + if !r.IsSetRepo() { + return descriptor.Descriptor{}, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { - return types.Descriptor{}, err + return descriptor.Descriptor{}, err } - return schemeAPI.BlobPut(ctx, ref, d, rdr) + return schemeAPI.BlobPut(ctx, r, d, rdr) } diff --git a/vendor/github.com/regclient/regclient/config/credhelper.go b/vendor/github.com/regclient/regclient/config/credhelper.go index 7e0097734..b926f9934 100644 --- a/vendor/github.com/regclient/regclient/config/credhelper.go +++ b/vendor/github.com/regclient/regclient/config/credhelper.go @@ -10,7 +10,7 @@ import ( "strings" ) -// cred helper wraps a command +// credHelper wraps a command that manages user credentials. type credHelper struct { prog string env map[string]string @@ -21,6 +21,7 @@ func newCredHelper(prog string, env map[string]string) *credHelper { } func (ch *credHelper) run(arg string, input io.Reader) ([]byte, error) { + //#nosec G204 only untrusted arg is a hostname which the executed command should not trust cmd := exec.Command(ch.prog, arg) cmd.Env = os.Environ() if ch.env != nil { @@ -39,6 +40,7 @@ type credStore struct { Secret string `json:"Secret"` } +// get requests a credential from the helper for a given host. func (ch *credHelper) get(host *Host) error { hostname := host.Hostname if host.CredHost != "" { @@ -52,7 +54,7 @@ func (ch *credHelper) get(host *Host) error { outB, err := ch.run("get", hostIn) if err != nil { outS := strings.TrimSpace(string(outB)) - return fmt.Errorf("error getting credentials, output: %s, error: %v", outS, err) + return fmt.Errorf("error getting credentials, output: %s, error: %w", outS, err) } err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credOut) if err != nil { @@ -70,12 +72,13 @@ func (ch *credHelper) get(host *Host) error { return nil } +// list returns a list of hosts supported by the credential helper. func (ch *credHelper) list() ([]Host, error) { credList := map[string]string{} outB, err := ch.run("list", bytes.NewReader([]byte{})) if err != nil { outS := strings.TrimSpace(string(outB)) - return nil, fmt.Errorf("error getting credential list, output: %s, error: %v", outS, err) + return nil, fmt.Errorf("error getting credential list, output: %s, error: %w", outS, err) } err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credList) if err != nil { @@ -91,4 +94,4 @@ func (ch *credHelper) list() ([]Host, error) { return hostList, nil } -// store method not implemented +// TODO: store method not implemented diff --git a/vendor/github.com/regclient/regclient/config/docker.go b/vendor/github.com/regclient/regclient/config/docker.go index dc3935494..8a5d02a5d 100644 --- a/vendor/github.com/regclient/regclient/config/docker.go +++ b/vendor/github.com/regclient/regclient/config/docker.go @@ -13,14 +13,16 @@ import ( ) const ( - dockerEnv = "DOCKER_CONFIG" - dockerDir = ".docker" - dockerConfFile = "config.json" + // dockerEnv is the environment variable used to look for Docker's config.json. + dockerEnv = "DOCKER_CONFIG" + // dockerDir is the directory name for Docker's config (inside the users home directory). + dockerDir = ".docker" + // dockerConfFile is the name of Docker's config file. + dockerConfFile = "config.json" + // dockerHelperPre is the prefix of docker credential helpers. dockerHelperPre = "docker-credential-" ) -// methods to parse user's docker config.json file - // dockerConfig is used to parse the ~/.docker/config.json type dockerConfig struct { AuthConfigs map[string]dockerAuthConfig `json:"auths"` @@ -56,12 +58,13 @@ type dockerAuthConfig struct { RegistryToken string `json:"registrytoken,omitempty"` } +// DockerLoad returns a slice of hosts from the users docker config. func DockerLoad() ([]Host, error) { cf := conffile.New(conffile.WithDirName(dockerDir, dockerConfFile), conffile.WithEnvDir(dockerEnv, dockerConfFile)) return dockerParse(cf) } -// parse from io.Reader to []Host +// dockerParse parses a docker config into a slice of Hosts. func dockerParse(cf *conffile.File) ([]Host, error) { rdr, err := cf.Open() if err != nil && errors.Is(err, fs.ErrNotExist) { @@ -102,6 +105,7 @@ func dockerParse(cf *conffile.File) ([]Host, error) { return hosts, nil } +// dockerAuthToHost parses an auth entry from a docker config into a Host. func dockerAuthToHost(name string, conf dockerConfig, auth dockerAuthConfig) (Host, error) { helper := "" if conf.CredentialHelpers != nil && conf.CredentialHelpers[name] != "" { @@ -127,6 +131,7 @@ func dockerAuthToHost(name string, conf dockerConfig, auth dockerAuthConfig) (Ho return *h, nil } +// decodeAuth extracts a base64 encoded user:pass into the username and password. func decodeAuth(authStr string) (string, string, error) { if authStr == "" { return "", "", nil diff --git a/vendor/github.com/regclient/regclient/config/host.go b/vendor/github.com/regclient/regclient/config/host.go index 3e2b4c42f..5ed3eecb4 100644 --- a/vendor/github.com/regclient/regclient/config/host.go +++ b/vendor/github.com/regclient/regclient/config/host.go @@ -1,4 +1,4 @@ -// Package config is used for all regclient configuration settings +// Package config is used for all regclient configuration settings. package config import ( @@ -6,43 +6,53 @@ import ( "fmt" "io" "strings" + "sync" "time" - "github.com/regclient/regclient/internal/timejson" "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/throttle" + "github.com/regclient/regclient/internal/timejson" ) -// TLSConf specifies whether TLS is enabled for a host +// TLSConf specifies whether TLS is enabled and verified for a host. type TLSConf int const ( - // TLSUndefined indicates TLS is not passed, defaults to Enabled + // TLSUndefined indicates TLS is not passed, defaults to Enabled. TLSUndefined TLSConf = iota - // TLSEnabled uses TLS (https) for the connection + // TLSEnabled uses TLS (https) for the connection. TLSEnabled - // TLSInsecure uses TLS but does not verify CA + // TLSInsecure uses TLS but does not verify CA. TLSInsecure - // TLSDisabled does not use TLS (http) + // TLSDisabled does not use TLS (http). TLSDisabled ) const ( - // DockerRegistry is the name resolved in docker images on Hub + // DockerRegistry is the name resolved in docker images on Hub. DockerRegistry = "docker.io" - // DockerRegistryAuth is the name provided in docker's config for Hub + // DockerRegistryAuth is the name provided in docker's config for Hub. DockerRegistryAuth = "https://index.docker.io/v1/" - // DockerRegistryDNS is the host to connect to for Hub + // DockerRegistryDNS is the host to connect to for Hub. DockerRegistryDNS = "registry-1.docker.io" - // tokenUser is the username returned by credential helpers that indicates the password is an identity token + // defaultExpire is the default time to expire a credential and force re-authentication. + defaultExpire = time.Hour * 1 + // defaultCredHelperRetry is the time to refresh a credential from a failed credential helper command. + defaultCredHelperRetry = time.Second * 5 + // defaultConcurrent is the default number of concurrent registry connections. + defaultConcurrent = 3 + // defaultReqPerSec is the default maximum frequency to send requests to a registry. + defaultReqPerSec = 10 + // tokenUser is the username returned by credential helpers that indicates the password is an identity token. tokenUser = "" ) var ( - defaultExpire = time.Hour * 1 - defaultCredHelperRetry = time.Second * 5 + mu = sync.Mutex{} ) -// MarshalJSON converts to a json string using MarshalText +// MarshalJSON converts TLSConf to a json string using MarshalText. func (t TLSConf) MarshalJSON() ([]byte, error) { s, err := t.MarshalText() if err != nil { @@ -51,7 +61,7 @@ func (t TLSConf) MarshalJSON() ([]byte, error) { return json.Marshal(string(s)) } -// MarshalText converts TLSConf to a string +// MarshalText converts TLSConf to a string. func (t TLSConf) MarshalText() ([]byte, error) { var s string switch t { @@ -67,7 +77,7 @@ func (t TLSConf) MarshalText() ([]byte, error) { return []byte(s), nil } -// UnmarshalJSON converts TLSConf from a json string +// UnmarshalJSON converts TLSConf from a json string. func (t *TLSConf) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { @@ -76,7 +86,7 @@ func (t *TLSConf) UnmarshalJSON(b []byte) error { return t.UnmarshalText([]byte(s)) } -// UnmarshalText converts TLSConf from a string +// UnmarshalText converts TLSConf from a string. func (t *TLSConf) UnmarshalText(b []byte) error { switch strings.ToLower(string(b)) { default: @@ -93,49 +103,52 @@ func (t *TLSConf) UnmarshalText(b []byte) error { return nil } -// Host struct contains host specific settings +// Host defines settings for connecting to a registry. type Host struct { - Name string `json:"-" yaml:"registry,omitempty"` // name of the host, read from yaml, not written in json - Scheme string `json:"scheme,omitempty" yaml:"scheme"` // TODO: deprecate, delete - TLS TLSConf `json:"tls,omitempty" yaml:"tls"` // enabled, disabled, insecure - RegCert string `json:"regcert,omitempty" yaml:"regcert"` // public pem cert of registry - ClientCert string `json:"clientcert,omitempty" yaml:"clientcert"` // public pem cert for client (mTLS) - ClientKey string `json:"clientkey,omitempty" yaml:"clientkey"` // private pem cert for client (mTLS) - DNS []string `json:"dns,omitempty" yaml:"dns"` // TODO: remove slice, single string, or remove entirely? - Hostname string `json:"hostname,omitempty" yaml:"hostname"` // replaces DNS array with single string - User string `json:"user,omitempty" yaml:"user"` // username, not used with credHelper - Pass string `json:"pass,omitempty" yaml:"pass"` // password, not used with credHelper - Token string `json:"token,omitempty" yaml:"token"` // token, experimental for specific APIs - CredHelper string `json:"credHelper,omitempty" yaml:"credHelper"` // credential helper command for requesting logins - CredExpire timejson.Duration `json:"credExpire,omitempty" yaml:"credExpire"` // time until credential expires - CredHost string `json:"credHost" yaml:"credHost"` // used when a helper hostname doesn't match Hostname - credRefresh time.Time `json:"-" yaml:"-"` // internal use, when to refresh credentials - PathPrefix string `json:"pathPrefix,omitempty" yaml:"pathPrefix"` // used for mirrors defined within a repository namespace - Mirrors []string `json:"mirrors,omitempty" yaml:"mirrors"` // list of other Host Names to use as mirrors - Priority uint `json:"priority,omitempty" yaml:"priority"` // priority when sorting mirrors, higher priority attempted first - RepoAuth bool `json:"repoAuth,omitempty" yaml:"repoAuth"` // tracks a separate auth per repo - API string `json:"api,omitempty" yaml:"api"` // experimental: registry API to use - APIOpts map[string]string `json:"apiOpts,omitempty" yaml:"apiOpts"` // options for APIs - BlobChunk int64 `json:"blobChunk,omitempty" yaml:"blobChunk"` // size of each blob chunk - BlobMax int64 `json:"blobMax,omitempty" yaml:"blobMax"` // threshold to switch to chunked upload, -1 to disable, 0 for regclient.blobMaxPut - ReqPerSec float64 `json:"reqPerSec,omitempty" yaml:"reqPerSec"` // requests per second - ReqConcurrent int64 `json:"reqConcurrent,omitempty" yaml:"reqConcurrent"` // concurrent requests + Name string `json:"-" yaml:"registry,omitempty"` // Name of the registry (required) (yaml configs pass this as a field, json provides this from the object key) + TLS TLSConf `json:"tls,omitempty" yaml:"tls"` // TLS setting: enabled (default), disabled, insecure + RegCert string `json:"regcert,omitempty" yaml:"regcert"` // public pem cert of registry + ClientCert string `json:"clientCert,omitempty" yaml:"clientCert"` // public pem cert for client (mTLS) + ClientKey string `json:"clientKey,omitempty" yaml:"clientKey"` // private pem cert for client (mTLS) + Hostname string `json:"hostname,omitempty" yaml:"hostname"` // hostname of registry, default is the registry name + User string `json:"user,omitempty" yaml:"user"` // username, not used with credHelper + Pass string `json:"pass,omitempty" yaml:"pass"` // password, not used with credHelper + Token string `json:"token,omitempty" yaml:"token"` // token, experimental for specific APIs + CredHelper string `json:"credHelper,omitempty" yaml:"credHelper"` // credential helper command for requesting logins + CredExpire timejson.Duration `json:"credExpire,omitempty" yaml:"credExpire"` // time until credential expires + CredHost string `json:"credHost" yaml:"credHost"` // used when a helper hostname doesn't match Hostname + PathPrefix string `json:"pathPrefix,omitempty" yaml:"pathPrefix"` // used for mirrors defined within a repository namespace + Mirrors []string `json:"mirrors,omitempty" yaml:"mirrors"` // list of other Host Names to use as mirrors + Priority uint `json:"priority,omitempty" yaml:"priority"` // priority when sorting mirrors, higher priority attempted first + RepoAuth bool `json:"repoAuth,omitempty" yaml:"repoAuth"` // tracks a separate auth per repo + API string `json:"api,omitempty" yaml:"api"` // experimental: registry API to use + APIOpts map[string]string `json:"apiOpts,omitempty" yaml:"apiOpts"` // options for APIs + BlobChunk int64 `json:"blobChunk,omitempty" yaml:"blobChunk"` // size of each blob chunk + BlobMax int64 `json:"blobMax,omitempty" yaml:"blobMax"` // threshold to switch to chunked upload, -1 to disable, 0 for regclient.blobMaxPut + ReqPerSec float64 `json:"reqPerSec,omitempty" yaml:"reqPerSec"` // requests per second, default is defaultReqPerSec(10) + ReqConcurrent int64 `json:"reqConcurrent,omitempty" yaml:"reqConcurrent"` // concurrent requests, default is defaultConcurrent(3) + Scheme string `json:"scheme,omitempty" yaml:"scheme"` // Deprecated: use TLS instead + credRefresh time.Time `json:"-" yaml:"-"` // internal use, when to refresh credentials + throttle *throttle.Throttle `json:"-" yaml:"-"` // internal use, limit for concurrent requests } +// Cred defines a user credential for accessing a registry. type Cred struct { User, Password, Token string } -// HostNew creates a default Host entry +// HostNew creates a default Host entry. func HostNew() *Host { h := Host{ - TLS: TLSEnabled, - APIOpts: map[string]string{}, + TLS: TLSEnabled, + APIOpts: map[string]string{}, + ReqConcurrent: int64(defaultConcurrent), + ReqPerSec: float64(defaultReqPerSec), } return &h } -// HostNewName creates a default Host with a hostname +// HostNewName creates a default Host with a hostname. func HostNewName(name string) *Host { h := HostNew() origName := name @@ -168,6 +181,7 @@ func HostNewName(name string) *Host { return h } +// GetCred returns the credential, fetching from a credential helper if needed. func (host *Host) GetCred() Cred { // refresh from credHelper if needed if host.CredHelper != "" && (host.credRefresh.IsZero() || time.Now().After(host.credRefresh)) { @@ -193,7 +207,7 @@ func (host *Host) refreshHelper() { } } -// Merge adds fields from a new config host entry +// Merge adds fields from a new config host entry. func (host *Host) Merge(newHost Host, log *logrus.Logger) error { name := newHost.Name if name == "" { @@ -442,6 +456,14 @@ func (host *Host) Merge(newHost Host, log *logrus.Logger) error { if newHost.ReqConcurrent > 0 { if host.ReqConcurrent != 0 && host.ReqConcurrent != newHost.ReqConcurrent { + if host.throttle != nil { + log.WithFields(logrus.Fields{ + "orig": host.ReqConcurrent, + "new": newHost.ReqConcurrent, + "host": name, + }).Warn("Unable to change ReqConcurrent after throttle is created") + return fmt.Errorf("unable to change ReqConcurrent after throttle is created") + } log.WithFields(logrus.Fields{ "orig": host.ReqConcurrent, "new": newHost.ReqConcurrent, @@ -454,6 +476,18 @@ func (host *Host) Merge(newHost Host, log *logrus.Logger) error { return nil } +func (host *Host) Throttle() *throttle.Throttle { + if host.ReqConcurrent <= 0 { + return nil + } + mu.Lock() + defer mu.Unlock() + if host.throttle == nil { + host.throttle = throttle.New(int(host.ReqConcurrent)) + } + return host.throttle +} + func copyMapString(src map[string]string) map[string]string { copy := map[string]string{} for k, v := range src { diff --git a/vendor/github.com/regclient/regclient/image.go b/vendor/github.com/regclient/regclient/image.go index 960ad2074..574892471 100644 --- a/vendor/github.com/regclient/regclient/image.go +++ b/vendor/github.com/regclient/regclient/image.go @@ -2,13 +2,16 @@ package regclient import ( "archive/tar" + "compress/gzip" "context" "encoding/json" "errors" "fmt" "io" + "net/url" "path/filepath" "strings" + "sync" "time" // crypto libraries included for go-digest @@ -16,16 +19,20 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/pkg/archive" "github.com/regclient/regclient/scheme" "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/platform" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/warning" - "github.com/sirupsen/logrus" ) const ( @@ -42,15 +49,17 @@ type dockerTarManifest struct { Config string RepoTags []string Layers []string - Parent digest.Digest `json:",omitempty"` - LayerSources map[digest.Digest]types.Descriptor `json:",omitempty"` + Parent digest.Digest `json:",omitempty"` + LayerSources map[digest.Digest]descriptor.Descriptor `json:",omitempty"` } type tarFileHandler func(header *tar.Header, trd *tarReadData) error type tarReadData struct { tr *tar.Reader + name string handleAdded bool handlers map[string]tarFileHandler + links map[string][]string processed map[string]bool finish []func() error // data processed from various handlers @@ -71,73 +80,113 @@ type tarWriteData struct { } type imageOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) checkBaseDigest string checkBaseRef string checkSkipConfig bool child bool + exportCompress bool exportRef ref.Ref + fastCheck bool forceRecursive bool + importName string includeExternal bool digestTags bool platform string platforms []string referrerConfs []scheme.ReferrerConfig tagList []string + mu sync.Mutex + seen map[string]*imageSeen + finalFn []func(context.Context) error +} + +type imageSeen struct { + done chan struct{} + err error } -// ImageOpts define options for the Image* commands +// ImageOpts define options for the Image* commands. type ImageOpts func(*imageOpt) -// ImageWithCheckBaseDigest provides a base digest to compare +// ImageWithCallback provides progress data to a callback function. +func ImageWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) ImageOpts { + return func(opts *imageOpt) { + opts.callback = callback + } +} + +// ImageWithCheckBaseDigest provides a base digest to compare in ImageCheckBase. func ImageWithCheckBaseDigest(d string) ImageOpts { return func(opts *imageOpt) { opts.checkBaseDigest = d } } -// ImageWithCheckBaseRef provides a base reference to compare +// ImageWithCheckBaseRef provides a base reference to compare in ImageCheckBase. func ImageWithCheckBaseRef(r string) ImageOpts { return func(opts *imageOpt) { opts.checkBaseRef = r } } -// ImageWithCheckSkipConfig skips the configuration check +// ImageWithCheckSkipConfig skips the configuration check in ImageCheckBase. func ImageWithCheckSkipConfig() ImageOpts { return func(opts *imageOpt) { opts.checkSkipConfig = true } } -// ImageWithChild attempts to copy every manifest and blob even if parent manifests already exist. +// ImageWithChild attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. func ImageWithChild() ImageOpts { return func(opts *imageOpt) { opts.child = true } } -// ImageWithExportRef overrides the image name embedded in the export file +// ImageWithExportCompress adds gzip compression to tar export output in ImageExport. +func ImageWithExportCompress() ImageOpts { + return func(opts *imageOpt) { + opts.exportCompress = true + } +} + +// ImageWithExportRef overrides the image name embedded in the export file in ImageExport. func ImageWithExportRef(r ref.Ref) ImageOpts { return func(opts *imageOpt) { opts.exportRef = r } } -// ImageWithForceRecursive attempts to copy every manifest and blob even if parent manifests already exist. +// ImageWithFastCheck skips check for referrers when manifest has already been copied in ImageCopy. +func ImageWithFastCheck() ImageOpts { + return func(opts *imageOpt) { + opts.fastCheck = true + } +} + +// ImageWithForceRecursive attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. func ImageWithForceRecursive() ImageOpts { return func(opts *imageOpt) { opts.forceRecursive = true } } -// ImageWithIncludeExternal attempts to copy every manifest and blob even if parent manifests already exist. +// ImageWithImportName selects the name of the image to import when multiple images are included in ImageImport. +func ImageWithImportName(name string) ImageOpts { + return func(opts *imageOpt) { + opts.importName = name + } +} + +// ImageWithIncludeExternal attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. func ImageWithIncludeExternal() ImageOpts { return func(opts *imageOpt) { opts.includeExternal = true } } -// ImageWithDigestTags looks for "sha-.*" tags in the repo to copy with any manifest. +// ImageWithDigestTags looks for "sha-.*" tags in the repo to copy with any manifest in ImageCopy. // These are used by some artifact systems like sigstore/cosign. func ImageWithDigestTags() ImageOpts { return func(opts *imageOpt) { @@ -145,15 +194,14 @@ func ImageWithDigestTags() ImageOpts { } } -// ImageWithPlatform requests specific platforms from a manifest list. -// This is used by ImageCheckBase. +// ImageWithPlatform requests specific platforms from a manifest list in ImageCheckBase. func ImageWithPlatform(p string) ImageOpts { return func(opts *imageOpt) { opts.platform = p } } -// ImageWithPlatforms only copies specific platforms from a manifest list. +// ImageWithPlatforms only copies specific platforms from a manifest list in ImageCopy. // This will result in a failure on many registries that validate manifests. // Use the empty string to indicate images without a platform definition should be copied. func ImageWithPlatforms(p []string) ImageOpts { @@ -162,7 +210,7 @@ func ImageWithPlatforms(p []string) ImageOpts { } } -// ImageWithReferrers recursively includes images that refer to this. +// ImageWithReferrers recursively recursively includes referrer images in ImageCopy. func ImageWithReferrers(rOpts ...scheme.ReferrerOpts) ImageOpts { return func(opts *imageOpt) { if opts.referrerConfs == nil { @@ -177,7 +225,7 @@ func ImageWithReferrers(rOpts ...scheme.ReferrerOpts) ImageOpts { } // ImageCheckBase returns nil if the base image is unchanged. -// A base image mismatch returns an error that wraps types.ErrMismatch. +// A base image mismatch returns an error that wraps errs.ErrMismatch. func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...ImageOpts) error { var opt imageOpt for _, optFn := range opts { @@ -194,7 +242,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag } ma, ok := m.(manifest.Annotator) if !ok { - return fmt.Errorf("image does not support annotations, base image must be provided%.0w", types.ErrMissingAnnotation) + return fmt.Errorf("image does not support annotations, base image must be provided%.0w", errs.ErrMissingAnnotation) } annot, err := ma.GetAnnotations() if err != nil { @@ -203,7 +251,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag if baseName, ok := annot[types.AnnotationBaseImageName]; ok { opt.checkBaseRef = baseName } else { - return fmt.Errorf("image does not have a base annotation, base image must be provided%.0w", types.ErrMissingAnnotation) + return fmt.Errorf("image does not have a base annotation, base image must be provided%.0w", errs.ErrMissingAnnotation) } if baseDig, ok := annot[types.AnnotationBaseImageDigest]; ok { opt.checkBaseDigest = baseDig @@ -238,7 +286,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag "expected": expectDig.String(), }).Debug("base image digest changed") return fmt.Errorf("base digest changed, %s, expected %s, received %s%.0w", - baseR.CommonName(), expectDig.String(), baseMH.GetDescriptor().Digest.String(), types.ErrMismatch) + baseR.CommonName(), expectDig.String(), baseMH.GetDescriptor().Digest.String(), errs.ErrMismatch) } } @@ -336,7 +384,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag "digest": baseLayers[i].Digest.String(), }).Debug("image layer changed") return fmt.Errorf("base layer changed, %s[%d], expected %s, received %s%.0w", - baseR.CommonName(), i, layers[i].Digest.String(), baseLayers[i].Digest.String(), types.ErrMismatch) + baseR.CommonName(), i, layers[i].Digest.String(), baseLayers[i].Digest.String(), errs.ErrMismatch) } } @@ -378,7 +426,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag "history": baseConfOCI.History[i], }).Debug("image history changed") return fmt.Errorf("base history changed, %s[%d], expected %v, received %v%.0w", - baseR.CommonName(), i, confOCI.History[i], baseConfOCI.History[i], types.ErrMismatch) + baseR.CommonName(), i, confOCI.History[i], baseConfOCI.History[i], errs.ErrMismatch) } } @@ -388,12 +436,16 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag return nil } -// ImageCopy copies an image -// This will retag an image in the same repository, only pushing and pulling the top level manifest -// On the same registry, it will attempt to use cross-repository blob mounts to avoid pulling blobs -// Blobs are only pulled when they don't exist on the target and a blob mount fails +// ImageCopy copies an image. +// This will retag an image in the same repository, only pushing and pulling the top level manifest. +// On the same registry, it will attempt to use cross-repository blob mounts to avoid pulling blobs. +// Blobs are only pulled when they don't exist on the target and a blob mount fails. +// Referrers are optionally copied recursively. func (rc *RegClient) ImageCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, opts ...ImageOpts) error { - var opt imageOpt + opt := imageOpt{ + seen: map[string]*imageSeen{}, + finalFn: []func(context.Context) error{}, + } for _, optFn := range opts { optFn(&opt) } @@ -401,196 +453,171 @@ func (rc *RegClient) ImageCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.R if w := warning.FromContext(ctx); w == nil { ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) } - return rc.imageCopyOpt(ctx, refSrc, refTgt, types.Descriptor{}, opt.child, &opt) -} - -func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor, child bool, opt *imageOpt) error { - mOpts := []ManifestOpts{} - if child { - mOpts = append(mOpts, WithManifestChild()) - } - // check if scheme/refTgt prefers parent manifests pushed first - // if so, this should automatically set forceRecursive - tgtSI, err := rc.schemeInfo(refTgt) + // block GC from running (in OCIDir) during the copy + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) if err != nil { - return fmt.Errorf("failed looking up scheme for %s: %v", refTgt.CommonName(), err) - } - if tgtSI.ManifestPushFirst { - opt.forceRecursive = true + return err } - // check if source and destination already match - mdh, errD := rc.ManifestHead(ctx, refTgt, WithManifestRequireDigest()) - if opt.forceRecursive { - // copy forced, unable to run below skips - } else if errD == nil && refTgt.Digest != "" && digest.Digest(refTgt.Digest) == mdh.GetDescriptor().Digest { - rc.log.WithFields(logrus.Fields{ - "target": refTgt.Reference, - "digest": mdh.GetDescriptor().Digest.String(), - }).Info("Copy not needed, target already up to date") - return nil - } else if errD == nil && refTgt.Digest == "" { - msh, errS := rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) - if errS == nil && msh.GetDescriptor().Digest == mdh.GetDescriptor().Digest { - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "digest": mdh.GetDescriptor().Digest.String(), - }).Info("Copy not needed, target already up to date") - return nil - } + if tgtGCLocker, isGCLocker := schemeTgtAPI.(scheme.GCLocker); isGCLocker { + tgtGCLocker.GCLock(refTgt) + defer tgtGCLocker.GCUnlock(refTgt) } - - // get the manifest for the source - m, err := rc.ManifestGet(ctx, refSrc, WithManifestDesc(d)) + // run the copy of manifests and blobs recursively + err = rc.imageCopyOpt(ctx, refSrc, refTgt, descriptor.Descriptor{}, opt.child, []digest.Digest{}, &opt) if err != nil { - rc.log.WithFields(logrus.Fields{ - "ref": refSrc.Reference, - "err": err, - }).Warn("Failed to get source manifest") return err } - - if tgtSI.ManifestPushFirst { - // push manifest to target - err = rc.ManifestPut(ctx, refTgt, m, mOpts...) + // run any final functions, digest-tags and referrers that detected loops are retried here + for _, fn := range opt.finalFn { + err := fn(ctx) if err != nil { - rc.log.WithFields(logrus.Fields{ - "target": refTgt.Reference, - "err": err, - }).Warn("Failed to push manifest") return err } } + return nil +} - if !ref.EqualRepository(refSrc, refTgt) { - // copy components of the image if the repository is different - if mi, ok := m.(manifest.Indexer); ok { - // manifest lists need to recursively copy nested images by digest - pd, err := mi.GetManifestList() +// imageCopyOpt is a thread safe copy of a manifest and nested content. +func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, child bool, parents []digest.Digest, opt *imageOpt) (err error) { + var mSrc, mTgt manifest.Manifest + var sDig digest.Digest + seenCB := func(error) {} + defer func() { + if seenCB != nil { + seenCB(err) + } + }() + // if digest is provided and we are already copying it, wait + if d.Digest != "" { + sDig = d.Digest + } else if refSrc.Digest != "" { + sDig = digest.Digest(refSrc.Digest) + } + if sDig != "" { + if seenCB, err = imageSeenOrWait(ctx, opt, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // check target with head request + mTgt, err = rc.ManifestHead(ctx, refTgt, WithManifestRequireDigest()) + var urlError *url.Error + if err != nil && errors.As(err, &urlError) { + return fmt.Errorf("failed to access target registry: %w", err) + } + // for non-recursive copies, compare to source digest + if err == nil && (opt.fastCheck || (!opt.forceRecursive && opt.referrerConfs == nil && !opt.digestTags)) { + if sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgt.Tag, sDig, parents); seenCB == nil { return err } - for _, entry := range pd { - // skip copy of platforms not specifically included - if len(opt.platforms) > 0 { - match, err := imagePlatformInList(entry.Platform, opt.platforms) - if err != nil { - return err - } - if !match { - rc.log.WithFields(logrus.Fields{ - "platform": entry.Platform, - }).Debug("Platform excluded from copy") - continue - } + } + if sDig == mTgt.GetDescriptor().Digest { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, mTgt.GetDescriptor().Size, mTgt.GetDescriptor().Size) + } + return nil + } + } + // when copying/updating digest tags or referrers, only the source digest is needed for an image + if mTgt != nil && mSrc == nil && !opt.forceRecursive && sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // get the source manifest when a copy is needed or recursion into the content is needed + if sDig == "" || mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive || mTgt.IsList() { + mSrc, err = rc.ManifestGet(ctx, refSrc, WithManifestDesc(d)) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + if sDig == "" { + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + } + // setup vars for a copy + mOpts := []ManifestOpts{} + if child { + mOpts = append(mOpts, WithManifestChild()) + } + bOpt := []BlobOpts{} + if opt.callback != nil { + bOpt = append(bOpt, BlobWithCallback(opt.callback)) + } + waitCh := make(chan error) + waitCount := 0 + ctx, cancel := context.WithCancel(ctx) + defer cancel() + parentsNew := make([]digest.Digest, len(parents)+1) + copy(parentsNew, parents) + parentsNew[len(parentsNew)-1] = sDig + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } + // process entries in an index + if mSrcIndex, ok := mSrc.(manifest.Indexer); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // manifest lists need to recursively copy nested images by digest + dList, err := mSrcIndex.GetManifestList() + if err != nil { + return err + } + for _, dEntry := range dList { + // skip copy of platforms not specifically included + if len(opt.platforms) > 0 { + match, err := imagePlatformInList(dEntry.Platform, opt.platforms) + if err != nil { + return err + } + if !match { + rc.log.WithFields(logrus.Fields{ + "platform": dEntry.Platform, + }).Debug("Platform excluded from copy") + continue } + } + dEntry := dEntry + waitCount++ + go func() { + var err error rc.log.WithFields(logrus.Fields{ - "platform": entry.Platform, - "digest": entry.Digest.String(), + "platform": dEntry.Platform, + "digest": dEntry.Digest.String(), }).Debug("Copy platform") - entrySrc := refSrc - entryTgt := refTgt - entrySrc.Tag = "" - entryTgt.Tag = "" - entrySrc.Digest = entry.Digest.String() - entryTgt.Digest = entry.Digest.String() - switch entry.MediaType { - case types.MediaTypeDocker1Manifest, types.MediaTypeDocker1ManifestSigned, - types.MediaTypeDocker2Manifest, types.MediaTypeDocker2ManifestList, - types.MediaTypeOCI1Manifest, types.MediaTypeOCI1ManifestList: + entrySrc := refSrc.SetDigest(dEntry.Digest.String()) + entryTgt := refTgt.SetDigest(dEntry.Digest.String()) + switch dEntry.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: // known manifest media type - err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, entry, true, opt) - case types.MediaTypeDocker2ImageConfig, types.MediaTypeOCI1ImageConfig, - types.MediaTypeDocker2LayerGzip, types.MediaTypeOCI1Layer, types.MediaTypeOCI1LayerGzip, - types.MediaTypeBuildkitCacheConfig: + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2LayerGzip, mediatype.OCI1Layer, mediatype.OCI1LayerGzip, + mediatype.BuildkitCacheConfig: // known blob media type - err = rc.BlobCopy(ctx, entrySrc, entryTgt, entry) + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) default: // unknown media type, first try an image copy - err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, entry, true, opt) + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) if err != nil { // fall back to trying to copy a blob - err = rc.BlobCopy(ctx, entrySrc, entryTgt, entry) + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) } } - if err != nil { - return err - } - } - } - if mi, ok := m.(manifest.Imager); ok { - // copy components of an image - // transfer the config - cd, err := mi.GetConfig() - if err != nil { - // docker schema v1 does not have a config object, ignore if it's missing - if !errors.Is(err, types.ErrUnsupportedMediaType) { - rc.log.WithFields(logrus.Fields{ - "ref": refSrc.Reference, - "err": err, - }).Warn("Failed to get config digest from manifest") - return fmt.Errorf("failed to get config digest for %s: %w", refSrc.CommonName(), err) - } - } else { - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "digest": cd.Digest.String(), - }).Info("Copy config") - if err := rc.BlobCopy(ctx, refSrc, refTgt, cd); err != nil { - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "digest": cd.Digest.String(), - "err": err, - }).Warn("Failed to copy config") - return err - } - } - - // copy filesystem layers - l, err := mi.GetLayers() - if err != nil { - return err - } - for _, layerSrc := range l { - if len(layerSrc.URLs) > 0 && !opt.includeExternal { - // skip blobs where the URLs are defined, these aren't hosted and won't be pulled from the source - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "layer": layerSrc.Digest.String(), - "external-urls": layerSrc.URLs, - }).Debug("Skipping external layer") - continue - } - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "layer": layerSrc.Digest.String(), - }).Info("Copy layer") - if err := rc.BlobCopy(ctx, refSrc, refTgt, layerSrc); err != nil { - rc.log.WithFields(logrus.Fields{ - "source": refSrc.Reference, - "target": refTgt.Reference, - "layer": layerSrc.Digest.String(), - "err": err, - }).Warn("Failed to copy layer") - return err - } - } - } - } - - if !tgtSI.ManifestPushFirst { - // push manifest to target - err = rc.ManifestPut(ctx, refTgt, m, mOpts...) - if err != nil { - rc.log.WithFields(logrus.Fields{ - "target": refTgt.Reference, - "err": err, - }).Warn("Failed to push manifest") - return err + waitCh <- err + }() } } @@ -602,7 +629,7 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re return err } referrerTags = append(referrerTags, rl.Tags...) - descList := []types.Descriptor{} + descList := []descriptor.Descriptor{} if len(opt.referrerConfs) == 0 { descList = rl.Descriptors } else { @@ -612,29 +639,48 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re } } for _, rDesc := range descList { - referrerSrc := refSrc - referrerSrc.Tag = "" - referrerSrc.Digest = rDesc.Digest.String() - referrerTgt := refTgt - referrerTgt.Tag = "" - referrerTgt.Digest = rDesc.Digest.String() - err = rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, opt) - if err != nil { - rc.log.WithFields(logrus.Fields{ - "digest": rDesc.Digest.String(), - "src": referrerSrc.CommonName(), - "tgt": referrerTgt.CommonName(), - }).Warn("Failed to copy referrer") - return err + opt.mu.Lock() + seen := opt.seen[":"+rDesc.Digest.String()] + opt.mu.Unlock() + if seen != nil { + continue // skip referrers that have been seen } + referrerSrc := refSrc.SetDigest(rDesc.Digest.String()) + referrerTgt := refTgt.SetDigest(rDesc.Digest.String()) + rDesc := rDesc + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the referrers copy to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "digest": rDesc.Digest.String(), + "src": referrerSrc.CommonName(), + "tgt": referrerTgt.CommonName(), + }).Warn("Failed to copy referrer") + } + waitCh <- err + } + }() } } // lookup digest tags to include artifacts with image if opt.digestTags { - if len(opt.tagList) == 0 { + // load tag listing for digest tag copy + opt.mu.Lock() + if opt.tagList == nil { tl, err := rc.TagList(ctx, refSrc) if err != nil { + opt.mu.Unlock() rc.log.WithFields(logrus.Fields{ "source": refSrc.Reference, "err": err, @@ -643,58 +689,293 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re } tags, err := tl.GetTags() if err != nil { + opt.mu.Unlock() rc.log.WithFields(logrus.Fields{ "source": refSrc.Reference, "err": err, }).Warn("Failed to list tags for digest-tag copy") return err } + if tags == nil { + tags = []string{} + } opt.tagList = tags } - prefix := fmt.Sprintf("%s-%s", m.GetDescriptor().Digest.Algorithm(), m.GetDescriptor().Digest.Encoded()) + opt.mu.Unlock() + prefix := fmt.Sprintf("%s-%s", sDig.Algorithm(), sDig.Encoded()) for _, tag := range opt.tagList { if strings.HasPrefix(tag, prefix) { // skip referrers that were copied above + found := false for _, referrerTag := range referrerTags { if referrerTag == tag { - continue + found = true + break } } - refTagSrc := refSrc - refTagSrc.Tag = tag - refTagSrc.Digest = "" - refTagTgt := refTgt - refTagTgt.Tag = tag - refTagTgt.Digest = "" - err = rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, types.Descriptor{}, false, opt) + if found { + continue + } + refTagSrc := refSrc.SetTag(tag) + refTagTgt := refTgt.SetTag(tag) + tag := tag + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the digest tag copy back to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "tag": tag, + "src": refTagSrc.CommonName(), + "tgt": refTagTgt.CommonName(), + }).Warn("Failed to copy digest-tag") + } + waitCh <- err + } + }() + } + } + } + + // check for any errors and abort early if found + err = nil + done := false + for !done && waitCount > 0 { + if err == nil { + select { + case err = <-waitCh: if err != nil { + cancel() + } + default: + done = true // happy path + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + if !done { + waitCount-- + } + } + if err != nil { + rc.log.WithFields(logrus.Fields{ + "err": err, + "sDig": sDig, + }).Debug("child manifest copy failed") + return err + } + + // If source is image, copy blobs + if mSrcImg, ok := mSrc.(manifest.Imager); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // copy the config + cd, err := mSrcImg.GetConfig() + if err != nil { + // docker schema v1 does not have a config object, ignore if it's missing + if !errors.Is(err, errs.ErrUnsupportedMediaType) { + rc.log.WithFields(logrus.Fields{ + "ref": refSrc.Reference, + "err": err, + }).Warn("Failed to get config digest from manifest") + return fmt.Errorf("failed to get config digest for %s: %w", refSrc.CommonName(), err) + } + } else { + waitCount++ + go func() { + rc.log.WithFields(logrus.Fields{ + "source": refSrc.Reference, + "target": refTgt.Reference, + "digest": cd.Digest.String(), + }).Info("Copy config") + err := rc.imageCopyBlob(ctx, refSrc, refTgt, cd, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { rc.log.WithFields(logrus.Fields{ - "tag": tag, - "src": refTagSrc.CommonName(), - "tgt": refTagTgt.CommonName(), - }).Warn("Failed to copy digest-tag") - return err + "source": refSrc.Reference, + "target": refTgt.Reference, + "digest": cd.Digest.String(), + "err": err, + }).Warn("Failed to copy config") } + waitCh <- err + }() + } + + // copy filesystem layers + l, err := mSrcImg.GetLayers() + if err != nil { + return err + } + for _, layerSrc := range l { + if len(layerSrc.URLs) > 0 && !opt.includeExternal { + // skip blobs where the URLs are defined, these aren't hosted and won't be pulled from the source + rc.log.WithFields(logrus.Fields{ + "source": refSrc.Reference, + "target": refTgt.Reference, + "layer": layerSrc.Digest.String(), + "external-urls": layerSrc.URLs, + }).Debug("Skipping external layer") + continue } + waitCount++ + layerSrc := layerSrc + go func() { + rc.log.WithFields(logrus.Fields{ + "source": refSrc.Reference, + "target": refTgt.Reference, + "layer": layerSrc.Digest.String(), + }).Info("Copy layer") + err := rc.imageCopyBlob(ctx, refSrc, refTgt, layerSrc, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "source": refSrc.Reference, + "target": refTgt.Reference, + "layer": layerSrc.Digest.String(), + "err": err, + }).Warn("Failed to copy layer") + } + waitCh <- err + }() } } + // wait for background tasks to finish + err = nil + for waitCount > 0 { + if err == nil { + err = <-waitCh + if err != nil { + cancel() + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + waitCount-- + } + if err != nil { + return err + } + + // push manifest + if mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive { + err = rc.ManifestPut(ctx, refTgt, mSrc, mOpts...) + if err != nil { + if !errors.Is(err, context.Canceled) { + rc.log.WithFields(logrus.Fields{ + "target": refTgt.Reference, + "err": err, + }).Warn("Failed to push manifest") + } + return err + } + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + } else { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, d.Size, d.Size) + } + } + if seenCB != nil { + seenCB(nil) + seenCB = nil + } + return nil } +func (rc *RegClient) imageCopyBlob(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opt *imageOpt, bOpt ...BlobOpts) error { + seenCB, err := imageSeenOrWait(ctx, opt, "", d.Digest, []digest.Digest{}) + if seenCB == nil { + return err + } + err = rc.BlobCopy(ctx, refSrc, refTgt, d, bOpt...) + seenCB(err) + return err +} + +// imageSeenOrWait returns either a callback to report the error when the digest hasn't been seen before +// or it will wait for the previous copy to run and return the error from that copy +func imageSeenOrWait(ctx context.Context, opt *imageOpt, tag string, dig digest.Digest, parents []digest.Digest) (func(error), error) { + var seenNew *imageSeen + key := tag + ":" + dig.String() + opt.mu.Lock() + seen := opt.seen[key] + if seen == nil { + seenNew = &imageSeen{ + done: make(chan struct{}), + } + opt.seen[key] = seenNew + } + opt.mu.Unlock() + if seen != nil { + // quick check for the previous copy already done + select { + case <-seen.done: + return nil, seen.err + default: + } + // look for loops in parents + for _, p := range parents { + if key == tag+":"+p.String() { + return nil, errs.ErrLoopDetected + } + } + // wait for copy to finish or context to cancel + done := ctx.Done() + select { + case <-seen.done: + return nil, seen.err + case <-done: + return nil, ctx.Err() + } + } else { + return func(err error) { + seenNew.err = err + close(seenNew.done) + // on failures, delete the history to allow a retry + if err != nil { + opt.mu.Lock() + delete(opt.seen, key) + opt.mu.Unlock() + } + }, nil + } +} + // ImageExport exports an image to an output stream. // The format is compatible with "docker load" if a single image is selected and not a manifest list. // The ref must include a tag for exporting to docker (defaults to latest), and may also include a digest. -// The export is also formatted according to OCI layout which supports multi-platform images. -// +// The export is also formatted according to [OCI Layout] which supports multi-platform images. // A tar file will be sent to outStream. // // Resulting filesystem: -// oci-layout: created at top level, can be done at the start -// index.json: created at top level, single descriptor with org.opencontainers.image.ref.name annotation pointing to the tag -// manifest.json: created at top level, based on every layer added, only works for a single arch image -// blobs/$algo/$hash: each content addressable object (manifest, config, or layer), created recursively +// - oci-layout: created at top level, can be done at the start +// - index.json: created at top level, single descriptor with org.opencontainers.image.ref.name annotation pointing to the tag +// - manifest.json: created at top level, based on every layer added, only works for a single arch image +// - blobs/$algo/$hash: each content addressable object (manifest, config, or layer), created recursively +// +// [OCI Layout]: https://github.com/opencontainers/image-spec/blob/master/image-layout.md func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Writer, opts ...ImageOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } var ociIndex v1.Index var opt imageOpt @@ -706,7 +987,13 @@ func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Wr } // create tar writer object - tw := tar.NewWriter(outStream) + out := outStream + if opt.exportCompress { + gzOut := gzip.NewWriter(out) + defer gzOut.Close() + out = gzOut + } + tw := tar.NewWriter(out) defer tw.Close() twd := &tarWriteData{ tw: tw, @@ -742,7 +1029,7 @@ func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Wr // generate/write an OCI index ociIndex.Versioned = v1.IndexSchemaVersion - ociIndex.Manifests = []types.Descriptor{mDesc} // initialize with the descriptor to the manifest list + ociIndex.Manifests = []descriptor.Descriptor{mDesc} // initialize with the descriptor to the manifest list err = twd.tarWriteFileJSON(ociIndexFilename, ociIndex) if err != nil { return err @@ -765,7 +1052,7 @@ func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Wr RepoTags: []string{refTag.CommonName()}, Config: tarOCILayoutDescPath(conf), Layers: []string{}, - LayerSources: map[digest.Digest]types.Descriptor{}, + LayerSources: map[digest.Digest]descriptor.Descriptor{}, } dl, err := mi.GetLayers() if err != nil { @@ -793,23 +1080,23 @@ func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Wr } // imageExportDescriptor pulls a manifest or blob, outputs to a tar file, and recursively processes any nested manifests or blobs -func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, desc types.Descriptor, twd *tarWriteData) error { +func (rc *RegClient) imageExportDescriptor(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, twd *tarWriteData) error { tarFilename := tarOCILayoutDescPath(desc) if twd.files[tarFilename] { // blob has already been imported into tar, skip return nil } switch desc.MediaType { - case types.MediaTypeDocker1Manifest, types.MediaTypeDocker1ManifestSigned, types.MediaTypeDocker2Manifest, types.MediaTypeOCI1Manifest: + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, mediatype.Docker2Manifest, mediatype.OCI1Manifest: // Handle single platform manifests // retrieve manifest - m, err := rc.ManifestGet(ctx, ref, WithManifestDesc(desc)) + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) if err != nil { return err } mi, ok := m.(manifest.Imager) if !ok { - return fmt.Errorf("manifest doesn't support image methods%.0w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) } // write manifest body by digest mBody, err := m.RawBody() @@ -828,11 +1115,11 @@ func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, des // add config confD, err := mi.GetConfig() // ignore unsupported media type errors - if err != nil && !errors.Is(err, types.ErrUnsupportedMediaType) { + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { return err } if err == nil { - err = rc.imageExportDescriptor(ctx, ref, confD, twd) + err = rc.imageExportDescriptor(ctx, r, confD, twd) if err != nil { return err } @@ -841,28 +1128,28 @@ func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, des // loop over layers layerDL, err := mi.GetLayers() // ignore unsupported media type errors - if err != nil && !errors.Is(err, types.ErrUnsupportedMediaType) { + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { return err } if err == nil { for _, layerD := range layerDL { - err = rc.imageExportDescriptor(ctx, ref, layerD, twd) + err = rc.imageExportDescriptor(ctx, r, layerD, twd) if err != nil { return err } } } - case types.MediaTypeDocker2ManifestList, types.MediaTypeOCI1ManifestList: + case mediatype.Docker2ManifestList, mediatype.OCI1ManifestList: // handle OCI index and Docker manifest list // retrieve manifest - m, err := rc.ManifestGet(ctx, ref, WithManifestDesc(desc)) + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) if err != nil { return err } mi, ok := m.(manifest.Indexer) if !ok { - return fmt.Errorf("manifest doesn't support index methods%.0w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) } // write manifest body by digest mBody, err := m.RawBody() @@ -883,7 +1170,7 @@ func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, des return err } for _, md := range mdl { - err = rc.imageExportDescriptor(ctx, ref, md, twd) + err = rc.imageExportDescriptor(ctx, r, md, twd) if err != nil { return err } @@ -891,7 +1178,7 @@ func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, des default: // get blob - blobR, err := rc.BlobGet(ctx, ref, desc) + blobR, err := rc.BlobGet(ctx, r, desc) if err != nil { return err } @@ -913,26 +1200,36 @@ func (rc *RegClient) imageExportDescriptor(ctx context.Context, ref ref.Ref, des return nil } -// ImageImport pushes an image from a tar file to a registry -func (rc *RegClient) ImageImport(ctx context.Context, ref ref.Ref, rs io.ReadSeeker) error { +// ImageImport pushes an image from a tar file (ImageExport) to a registry. +func (rc *RegClient) ImageImport(ctx context.Context, r ref.Ref, rs io.ReadSeeker, opts ...ImageOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + trd := &tarReadData{ + name: opt.importName, handlers: map[string]tarFileHandler{}, + links: map[string][]string{}, processed: map[string]bool{}, finish: []func() error{}, manifests: map[digest.Digest]manifest.Manifest{}, } // add handler for oci-layout, index.json, and manifest.json - rc.imageImportOCIAddHandler(ctx, ref, trd) + rc.imageImportOCIAddHandler(ctx, r, trd) rc.imageImportDockerAddHandler(trd) // process tar file looking for oci-layout and index.json, load manifests/blobs on success err := trd.tarReadAll(rs) - if err != nil && errors.Is(err, types.ErrNotFound) && trd.dockerManifestFound { + if err != nil && errors.Is(err, errs.ErrNotFound) && trd.dockerManifestFound { // import failed but manifest.json found, fall back to manifest.json processing // add handlers for the docker manifest layers - rc.imageImportDockerAddLayerHandlers(ctx, ref, trd) + rc.imageImportDockerAddLayerHandlers(ctx, r, trd) // reprocess the tar looking for manifest.json files err = trd.tarReadAll(rs) if err != nil { @@ -943,7 +1240,7 @@ func (rc *RegClient) ImageImport(ctx context.Context, ref ref.Ref, rs io.ReadSee if err != nil { return err } - err = rc.ManifestPut(ctx, ref, m) + err = rc.ManifestPut(ctx, r, m) if err != nil { return err } @@ -952,7 +1249,7 @@ func (rc *RegClient) ImageImport(ctx context.Context, ref ref.Ref, rs io.ReadSee return err } else { // successful load of OCI blobs, now push manifest and tag - err = rc.imageImportOCIPushManifests(ctx, ref, trd) + err = rc.imageImportOCIPushManifests(ctx, r, trd) if err != nil { return err } @@ -960,21 +1257,21 @@ func (rc *RegClient) ImageImport(ctx context.Context, ref ref.Ref, rs io.ReadSee return nil } -func (rc *RegClient) imageImportBlob(ctx context.Context, ref ref.Ref, desc types.Descriptor, trd *tarReadData) error { +func (rc *RegClient) imageImportBlob(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, trd *tarReadData) error { // skip if blob already exists - _, err := rc.BlobHead(ctx, ref, desc) + _, err := rc.BlobHead(ctx, r, desc) if err == nil { return nil } // upload blob - _, err = rc.BlobPut(ctx, ref, desc, trd.tr) + _, err = rc.BlobPut(ctx, r, desc, trd.tr) if err != nil { return err } return nil } -// imageImportDockerAddHandler processes tar files generated by docker +// imageImportDockerAddHandler processes tar files generated by docker. func (rc *RegClient) imageImportDockerAddHandler(trd *tarReadData) { trd.handlers[dockerManifestFilename] = func(header *tar.Header, trd *tarReadData) error { err := trd.tarReadFileJSON(&trd.dockerManifestList) @@ -986,35 +1283,61 @@ func (rc *RegClient) imageImportDockerAddHandler(trd *tarReadData) { } } -// imageImportDockerAddLayerHandlers imports the docker layers when OCI import fails and docker manifest found -func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, ref ref.Ref, trd *tarReadData) { +// imageImportDockerAddLayerHandlers imports the docker layers when OCI import fails and docker manifest found. +func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, r ref.Ref, trd *tarReadData) { // remove handlers for OCI delete(trd.handlers, ociLayoutFilename) delete(trd.handlers, ociIndexFilename) + index := 0 + if trd.name != "" { + found := false + tags := []string{} + for i, entry := range trd.dockerManifestList { + tags = append(tags, entry.RepoTags...) + for _, tag := range entry.RepoTags { + if tag == trd.name { + index = i + found = true + break + } + } + if found { + break + } + } + if !found { + rc.log.WithFields(logrus.Fields{ + "tags": tags, + "name": trd.name, + }).Warn("Could not find requested name") + return + } + } + // make a docker v2 manifest from first json array entry (can only tag one image) trd.dockerManifest.SchemaVersion = 2 - trd.dockerManifest.MediaType = types.MediaTypeDocker2Manifest - trd.dockerManifest.Layers = make([]types.Descriptor, len(trd.dockerManifestList[0].Layers)) + trd.dockerManifest.MediaType = mediatype.Docker2Manifest + trd.dockerManifest.Layers = make([]descriptor.Descriptor, len(trd.dockerManifestList[index].Layers)) // add handler for config - trd.handlers[filepath.Clean(trd.dockerManifestList[0].Config)] = func(header *tar.Header, trd *tarReadData) error { + trd.handlers[filepath.Clean(trd.dockerManifestList[index].Config)] = func(header *tar.Header, trd *tarReadData) error { // upload blob, digest is unknown - d, err := rc.BlobPut(ctx, ref, types.Descriptor{Size: header.Size}, trd.tr) + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{Size: header.Size}, trd.tr) if err != nil { return err } // save the resulting descriptor to the manifest - if od, ok := trd.dockerManifestList[0].LayerSources[d.Digest]; ok { + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { trd.dockerManifest.Config = od } else { - d.MediaType = types.MediaTypeDocker2ImageConfig + d.MediaType = mediatype.Docker2ImageConfig trd.dockerManifest.Config = d } return nil } // add handlers for each layer - for i, layerFile := range trd.dockerManifestList[0].Layers { + for i, layerFile := range trd.dockerManifestList[index].Layers { func(i int) { trd.handlers[filepath.Clean(layerFile)] = func(header *tar.Header, trd *tarReadData) error { // ensure blob is compressed with gzip to match media type @@ -1023,15 +1346,15 @@ func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, ref return err } // upload blob, digest and size is unknown - d, err := rc.BlobPut(ctx, ref, types.Descriptor{}, gzipR) + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{}, gzipR) if err != nil { return err } // save the resulting descriptor in the appropriate layer - if od, ok := trd.dockerManifestList[0].LayerSources[d.Digest]; ok { + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { trd.dockerManifest.Layers[i] = od } else { - d.MediaType = types.MediaTypeDocker2LayerGzip + d.MediaType = mediatype.Docker2LayerGzip trd.dockerManifest.Layers[i] = d } return nil @@ -1041,8 +1364,8 @@ func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, ref trd.handleAdded = true } -// imageImportOCIAddHandler adds handlers for oci-layout and index.json found in OCI layout tar files -func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, ref ref.Ref, trd *tarReadData) { +// imageImportOCIAddHandler adds handlers for oci-layout and index.json found in OCI layout tar files. +func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, r ref.Ref, trd *tarReadData) { // add handler for oci-layout, index.json, and manifest.json var err error var foundLayout, foundIndex bool @@ -1058,7 +1381,7 @@ func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, ref ref.Ref, } // start recursively processing manifests starting with the index // there's no need to push the index.json by digest, it will be pushed by tag if needed - err = rc.imageImportOCIHandleManifest(ctx, ref, trd.ociManifest, trd, false, false) + err = rc.imageImportOCIHandleManifest(ctx, r, trd.ociManifest, trd, false, false) if err != nil { return err } @@ -1102,12 +1425,12 @@ func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, ref ref.Ref, } } -// imageImportOCIHandleManifest recursively processes index and manifest entries from an OCI layout tar -func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.Ref, m manifest.Manifest, trd *tarReadData, push bool, child bool) error { +// imageImportOCIHandleManifest recursively processes index and manifest entries from an OCI layout tar. +func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, r ref.Ref, m manifest.Manifest, trd *tarReadData, push bool, child bool) error { // cache the manifest to avoid needing to pull again later, this is used if index.json is a wrapper around some other manifest trd.manifests[m.GetDescriptor().Digest] = m - handleManifest := func(d types.Descriptor, child bool) { + handleManifest := func(d descriptor.Descriptor, child bool) { filename := tarOCILayoutDescPath(d) if !trd.processed[filename] && trd.handlers[filename] == nil { trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { @@ -1116,27 +1439,27 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R return err } switch d.MediaType { - case types.MediaTypeDocker1Manifest, types.MediaTypeDocker1ManifestSigned, - types.MediaTypeDocker2Manifest, types.MediaTypeDocker2ManifestList, - types.MediaTypeOCI1Manifest, types.MediaTypeOCI1ManifestList: + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: // known manifest media types md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) if err != nil { return err } - return rc.imageImportOCIHandleManifest(ctx, ref, md, trd, true, child) - case types.MediaTypeDocker2ImageConfig, types.MediaTypeOCI1ImageConfig, - types.MediaTypeDocker2LayerGzip, types.MediaTypeOCI1Layer, types.MediaTypeOCI1LayerGzip, - types.MediaTypeBuildkitCacheConfig: + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2LayerGzip, mediatype.OCI1Layer, mediatype.OCI1LayerGzip, + mediatype.BuildkitCacheConfig: // known blob media types - return rc.imageImportBlob(ctx, ref, d, trd) + return rc.imageImportBlob(ctx, r, d, trd) default: // attempt manifest import, fall back to blob import md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) if err == nil { - return rc.imageImportOCIHandleManifest(ctx, ref, md, trd, true, child) + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) } - return rc.imageImportBlob(ctx, ref, d, trd) + return rc.imageImportBlob(ctx, r, d, trd) } } } @@ -1145,7 +1468,7 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R if !push { mi, ok := m.(manifest.Indexer) if !ok { - return fmt.Errorf("manifest doesn't support image methods%.0w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) } // for root index, add handler for matching reference (or only reference) dl, err := mi.GetManifestList() @@ -1153,40 +1476,50 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R return err } // locate the digest in the index - var d types.Descriptor + var d descriptor.Descriptor if len(dl) == 1 { d = dl[0] - } else if ref.Digest != "" { - d.Digest = digest.Digest(ref.Digest) + } else if r.Digest != "" { + d.Digest = digest.Digest(r.Digest) + } else if trd.name != "" { + for _, cur := range dl { + if cur.Annotations[annotationRefName] == trd.name { + d = cur + break + } + } + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", trd.name) + } } else { - if ref.Tag == "" { - ref.Tag = "latest" + if r.Tag == "" { + r.Tag = "latest" } // if more than one digest is in the index, use the first matching tag for _, cur := range dl { - if cur.Annotations[annotationRefName] == ref.Tag { + if cur.Annotations[annotationRefName] == r.Tag { d = cur break } } - } - if d.Digest.String() == "" { - return fmt.Errorf("could not find requested tag in index.json, %s", ref.Tag) + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", r.Tag) + } } handleManifest(d, false) // add a finish step to tag the selected digest trd.finish = append(trd.finish, func() error { mRef, ok := trd.manifests[d.Digest] if !ok { - return fmt.Errorf("could not find manifest to tag, ref: %s, digest: %s", ref.CommonName(), d.Digest) + return fmt.Errorf("could not find manifest to tag, ref: %s, digest: %s", r.CommonName(), d.Digest) } - return rc.ManifestPut(ctx, ref, mRef) + return rc.ManifestPut(ctx, r, mRef) }) } else if m.IsList() { // for index/manifest lists, add handlers for each embedded manifest mi, ok := m.(manifest.Indexer) if !ok { - return fmt.Errorf("manifest doesn't support index methods%.0w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) } dl, err := mi.GetManifestList() if err != nil { @@ -1199,16 +1532,16 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R // else if a single image/manifest mi, ok := m.(manifest.Imager) if !ok { - return fmt.Errorf("manifest doesn't support image methods%.0w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) } // add handler for the config descriptor if it's defined cd, err := mi.GetConfig() if err == nil { filename := tarOCILayoutDescPath(cd) if !trd.processed[filename] && trd.handlers[filename] == nil { - func(cd types.Descriptor) { + func(cd descriptor.Descriptor) { trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { - return rc.imageImportBlob(ctx, ref, cd, trd) + return rc.imageImportBlob(ctx, r, cd, trd) } }(cd) } @@ -1221,9 +1554,9 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R for _, d := range layers { filename := tarOCILayoutDescPath(d) if !trd.processed[filename] && trd.handlers[filename] == nil { - func(d types.Descriptor) { + func(d descriptor.Descriptor) { trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { - return rc.imageImportBlob(ctx, ref, d, trd) + return rc.imageImportBlob(ctx, r, d, trd) } }(d) } @@ -1232,7 +1565,7 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R // add a finish func to push the manifest, this gets skipped for the index.json if push { trd.finish = append(trd.finish, func() error { - mRef := ref + mRef := r mRef.Digest = string(m.GetDescriptor().Digest) _, err := rc.ManifestHead(ctx, mRef) if err == nil { @@ -1249,8 +1582,8 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, ref ref.R return nil } -// imageImportOCIPushManifests uploads manifests after OCI blobs were successfully loaded -func (rc *RegClient) imageImportOCIPushManifests(ctx context.Context, ref ref.Ref, trd *tarReadData) error { +// imageImportOCIPushManifests uploads manifests after OCI blobs were successfully loaded. +func (rc *RegClient) imageImportOCIPushManifests(_ context.Context, _ ref.Ref, trd *tarReadData) error { // run finish handlers in reverse order to upload nested manifests for i := len(trd.finish) - 1; i >= 0; i-- { err := trd.finish[i]() @@ -1286,8 +1619,8 @@ func imagePlatformInList(target *platform.Platform, list []string) (bool, error) return false, nil } -// tarReadAll processes the tar file in a loop looking for matching filenames in the list of handlers -// handlers for filenames are added at the top level, and by manifest imports +// tarReadAll processes the tar file in a loop looking for matching filenames in the list of handlers. +// Handlers for filenames are added at the top level, and by manifest imports. func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error { // return immediately if nothing to do if len(trd.handlers) == 0 { @@ -1299,7 +1632,11 @@ func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error { if err != nil { return err } - trd.tr = tar.NewReader(rs) + dr, err := archive.Decompress(rs) + if err != nil { + return err + } + trd.tr = tar.NewReader(dr) trd.handleAdded = false // loop over each entry of the tar file for { @@ -1310,28 +1647,89 @@ func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error { return err } name := filepath.Clean(header.Name) - // if a handler exists, run it, remove handler, and check if we are done - if trd.handlers[name] != nil { - err = trd.handlers[name](header, trd) + // track symlinks + if header.Typeflag == tar.TypeSymlink || header.Typeflag == tar.TypeLink { + // normalize target relative to root of tar + target := header.Linkname + if !filepath.IsAbs(target) { + target, err = filepath.Rel(filepath.Dir(name), target) + if err != nil { + return err + } + } + target = filepath.Clean("/" + target)[1:] + // track and set handleAdded if an existing handler points to the target + if trd.linkAdd(name, target) && !trd.handleAdded { + list, err := trd.linkList(target) + if err != nil { + return err + } + for _, src := range append(list, name) { + if trd.handlers[src] != nil { + trd.handleAdded = true + } + } + } + } else { + // loop through filename and symlinks to file in search of handlers + list, err := trd.linkList(name) if err != nil { return err } - delete(trd.handlers, name) - trd.processed[name] = true - // return if last handler processed - if len(trd.handlers) == 0 { - return nil + list = append(list, name) + trdUsed := false + for _, entry := range list { + if trd.handlers[entry] != nil { + // trd cannot be reused, force the loop to run again + if trdUsed { + trd.handleAdded = true + break + } + trdUsed = true + // run handler + err = trd.handlers[entry](header, trd) + if err != nil { + return err + } + delete(trd.handlers, entry) + trd.processed[entry] = true + // return if last handler processed + if len(trd.handlers) == 0 { + return nil + } + } } } } // if entire file read without adding a new handler, fail if !trd.handleAdded { - return fmt.Errorf("unable to read all files from tar: %w", types.ErrNotFound) + return fmt.Errorf("unable to read all files from tar: %w", errs.ErrNotFound) + } + } +} + +func (trd *tarReadData) linkAdd(src, tgt string) bool { + for _, entry := range trd.links[tgt] { + if entry == src { + return false + } + } + trd.links[tgt] = append(trd.links[tgt], src) + return true +} + +func (trd *tarReadData) linkList(tgt string) ([]string, error) { + list := trd.links[tgt] + for _, entry := range list { + if entry == tgt { + return nil, fmt.Errorf("symlink loop encountered for %s", tgt) } + list = append(list, trd.links[entry]...) } + return list, nil } -// tarReadFileJSON reads the current tar entry and unmarshals json into provided interface +// tarReadFileJSON reads the current tar entry and unmarshals json into provided interface. func (trd *tarReadData) tarReadFileJSON(data interface{}) error { b, err := io.ReadAll(trd.tr) if err != nil { @@ -1398,6 +1796,6 @@ func (td *tarWriteData) tarWriteFileJSON(filename string, data interface{}) erro return nil } -func tarOCILayoutDescPath(d types.Descriptor) string { +func tarOCILayoutDescPath(d descriptor.Descriptor) string { return filepath.Clean(fmt.Sprintf("blobs/%s/%s", d.Digest.Algorithm(), d.Digest.Encoded())) } diff --git a/vendor/github.com/regclient/regclient/internal/auth/auth.go b/vendor/github.com/regclient/regclient/internal/auth/auth.go index de5fab5c3..3da146183 100644 --- a/vendor/github.com/regclient/regclient/internal/auth/auth.go +++ b/vendor/github.com/regclient/regclient/internal/auth/auth.go @@ -15,6 +15,8 @@ import ( "time" "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/types/errs" ) type charLU byte @@ -488,7 +490,7 @@ func (b *BasicHandler) ProcessChallenge(c Challenge) error { func (b *BasicHandler) GenerateAuth() (string, error) { cred := b.credsFn(b.host) if cred.User == "" || cred.Password == "" { - return "", ErrNotFound + return "", fmt.Errorf("no credentials available: %w", errs.ErrHTTPUnauthorized) } auth := base64.StdEncoding.EncodeToString([]byte(cred.User + ":" + cred.Password)) return fmt.Sprintf("Basic %s", auth), nil @@ -606,14 +608,14 @@ func (b *BearerHandler) GenerateAuth() (string, error) { if err := b.tryPost(); err == nil { return fmt.Sprintf("Bearer %s", b.token.Token), nil } else if err != ErrUnauthorized { - return "", err + return "", fmt.Errorf("failed to request auth token (post): %w%.0w", err, errs.ErrHTTPUnauthorized) } // attempt a get (with basic auth if user/pass available) if err := b.tryGet(); err == nil { return fmt.Sprintf("Bearer %s", b.token.Token), nil } else if err != ErrUnauthorized { - return "", err + return "", fmt.Errorf("failed to request auth token (get): %w%.0w", err, errs.ErrHTTPUnauthorized) } return "", ErrUnauthorized diff --git a/vendor/github.com/regclient/regclient/internal/auth/error.go b/vendor/github.com/regclient/regclient/internal/auth/error.go index 72572f57c..2ba7e3b82 100644 --- a/vendor/github.com/regclient/regclient/internal/auth/error.go +++ b/vendor/github.com/regclient/regclient/internal/auth/error.go @@ -1,24 +1,40 @@ package auth import ( - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" ) var ( // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header - ErrEmptyChallenge = types.ErrEmptyChallenge + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + ErrEmptyChallenge = errs.ErrEmptyChallenge // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header - ErrInvalidChallenge = types.ErrInvalidChallenge + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + ErrInvalidChallenge = errs.ErrInvalidChallenge // ErrNoNewChallenge indicates a challenge update did not result in any change - ErrNoNewChallenge = types.ErrNoNewChallenge + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + ErrNoNewChallenge = errs.ErrNoNewChallenge // ErrNotFound indicates no credentials found for basic auth - ErrNotFound = types.ErrNotFound + // + // Deprecated: replace with [errs.ErrNotFound]. + ErrNotFound = errs.ErrNotFound // ErrNotImplemented returned when method has not been implemented yet - ErrNotImplemented = types.ErrNotImplemented + // + // Deprecated: replace with [errs.ErrNotImplemented]. + ErrNotImplemented = errs.ErrNotImplemented // ErrParseFailure indicates the WWW-Authenticate header could not be parsed - ErrParseFailure = types.ErrParsingFailed + // + // Deprecated: replace with [errs.ErrParseFailure]. + ErrParseFailure = errs.ErrParsingFailed // ErrUnauthorized request was not authorized - ErrUnauthorized = types.ErrHTTPUnauthorized + // + // Deprecated: replace with [errs.ErrUnauthorized]. + ErrUnauthorized = errs.ErrHTTPUnauthorized // ErrUnsupported indicates the request was unsupported - ErrUnsupported = types.ErrUnsupported + // + // Deprecated: replace with [errs.ErrUnsupported]. + ErrUnsupported = errs.ErrUnsupported ) diff --git a/vendor/github.com/regclient/regclient/internal/cache/cache.go b/vendor/github.com/regclient/regclient/internal/cache/cache.go new file mode 100644 index 000000000..4bc4523e4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/cache/cache.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Package cache is used to store values with limits. +// Items are automatically pruned when too many entries are stored, or values become stale. +package cache + +import ( + "sort" + "sync" + "time" + + "github.com/regclient/regclient/types/errs" +) + +type Cache[k comparable, v any] struct { + mu sync.Mutex + minAge time.Duration + maxAge time.Duration + minCount int + maxCount int + timer *time.Timer + entries map[k]*Entry[v] +} + +type Entry[v any] struct { + used time.Time + value v +} + +type sortKeys[k comparable] struct { + keys []k + lessFn func(a, b k) bool +} + +type conf struct { + minAge time.Duration + maxCount int +} + +type cacheOpts func(*conf) + +func WithAge(age time.Duration) cacheOpts { + return func(c *conf) { + c.minAge = age + } +} + +func WithCount(count int) cacheOpts { + return func(c *conf) { + c.maxCount = count + } +} + +func New[k comparable, v any](opts ...cacheOpts) Cache[k, v] { + c := conf{} + for _, opt := range opts { + opt(&c) + } + maxAge := c.minAge + (c.minAge / 10) + minCount := 0 + if c.maxCount > 0 { + minCount = int(float64(c.maxCount) * 0.9) + } + return Cache[k, v]{ + minAge: c.minAge, + maxAge: maxAge, + minCount: minCount, + maxCount: c.maxCount, + entries: map[k]*Entry[v]{}, + } +} + +func (c *Cache[k, v]) Delete(key k) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.entries, key) + if len(c.entries) == 0 && c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (c *Cache[k, v]) Set(key k, val v) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.entries[key] = &Entry[v]{ + used: time.Now(), + value: val, + } + if len(c.entries) > c.maxCount { + c.pruneLocked() + } else if c.timer == nil { + // prune resets the timer, so this is only needed if the prune wasn't triggered + c.timer = time.AfterFunc(c.maxAge, c.prune) + } +} + +func (c *Cache[k, v]) Get(key k) (v, error) { + if c == nil { + var val v + return val, errs.ErrNotFound + } + c.mu.Lock() + defer c.mu.Unlock() + if e, ok := c.entries[key]; ok { + if e.used.Add(c.minAge).Before(time.Now()) { + // entry expired + go c.prune() + } else { + c.entries[key].used = time.Now() + return e.value, nil + } + } + var val v + return val, errs.ErrNotFound +} + +func (c *Cache[k, v]) prune() { + c.mu.Lock() + defer c.mu.Unlock() + c.pruneLocked() +} + +func (c *Cache[k, v]) pruneLocked() { + // sort key list by last used date + keyList := make([]k, 0, len(c.entries)) + for key := range c.entries { + keyList = append(keyList, key) + } + sk := sortKeys[k]{ + keys: keyList, + lessFn: func(a, b k) bool { + return c.entries[a].used.Before(c.entries[b].used) + }, + } + sort.Sort(&sk) + // prune entries + now := time.Now() + cutoff := now.Add(c.minAge * -1) + nextTime := now + delCount := len(keyList) - c.minCount + for i, key := range keyList { + if i < delCount || c.entries[key].used.Before(cutoff) { + delete(c.entries, key) + } else { + nextTime = c.entries[key].used + break + } + } + // set next timer + if len(c.entries) > 0 { + dur := nextTime.Sub(now) + c.maxAge + if c.timer == nil { + // this shouldn't be possible + c.timer = time.AfterFunc(dur, c.prune) + } else { + c.timer.Reset(dur) + } + } else if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (sk *sortKeys[k]) Len() int { + return len(sk.keys) +} + +func (sk *sortKeys[k]) Less(i, j int) bool { + return sk.lessFn(sk.keys[i], sk.keys[j]) +} + +func (sk *sortKeys[k]) Swap(i, j int) { + sk.keys[i], sk.keys[j] = sk.keys[j], sk.keys[i] +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go index 7dd3dcb9a..a70370ed4 100644 --- a/vendor/github.com/regclient/regclient/internal/conffile/conffile.go +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go @@ -106,16 +106,17 @@ func (f *File) Write(rdr io.Reader) error { } tmpName := tmpStat.Name() tmpFullname := filepath.Join(dir, tmpName) - defer func() { - f.fs.Remove(tmpFullname) - }() + defer f.fs.Remove(tmpFullname) // copy from rdr to temp file _, err = io.Copy(tmp, rdr) - tmp.Close() + errC := tmp.Close() if err != nil { return fmt.Errorf("failed to write config: %w", err) } + if errC != nil { + return fmt.Errorf("failed to close config: %w", errC) + } // adjust file ownership/permissions mode := os.FileMode(0600) diff --git a/vendor/github.com/regclient/regclient/internal/httplink/httplink.go b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go index 777e656c0..b747cfd2b 100644 --- a/vendor/github.com/regclient/regclient/internal/httplink/httplink.go +++ b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" ) type Links []Link @@ -75,7 +75,7 @@ func Parse(headers []string) (Links, error) { // noop } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } case "uri": // parse tokens until space or comma @@ -90,7 +90,7 @@ func Parse(headers []string) (Links, error) { endLink() } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } case "uriQuoted": // parse tokens until quote @@ -109,7 +109,7 @@ func Parse(headers []string) (Links, error) { // noop } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } case "parmName": if len(pnb) > 0 && b == '=' { @@ -122,14 +122,14 @@ func Parse(headers []string) (Links, error) { // noop } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } case "parmNameStar": if b == '=' { state = "parmValue" } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } case "parmValue": if len(pvb) == 0 { @@ -139,7 +139,7 @@ func Parse(headers []string) (Links, error) { state = "parmValueQuoted" } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } } else { if charLUs[b]&isToken != 0 { @@ -156,7 +156,7 @@ func Parse(headers []string) (Links, error) { endLink() } else { // unknown character - return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) } } case "parmValueQuoted": @@ -178,7 +178,7 @@ func Parse(headers []string) (Links, error) { case "init": // noop default: - return nil, fmt.Errorf("unexpected end state %s for header %s: %w", state, h, types.ErrParsingFailed) + return nil, fmt.Errorf("unexpected end state %s for header %s: %w", state, h, errs.ErrParsingFailed) } } @@ -192,5 +192,5 @@ func (links Links) Get(parm, val string) (Link, error) { return link, nil } } - return Link{}, types.ErrNotFound + return Link{}, errs.ErrNotFound } diff --git a/vendor/github.com/regclient/regclient/internal/limitread/limitread.go b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go new file mode 100644 index 000000000..77115ef0c --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go @@ -0,0 +1,29 @@ +// Package limitread provides a reader that will error if the limit is ever exceeded +package limitread + +import ( + "fmt" + "io" + + "github.com/regclient/regclient/types/errs" +) + +type LimitRead struct { + Reader io.Reader + Limit int64 +} + +func (lr *LimitRead) Read(p []byte) (int, error) { + if lr.Limit < 0 { + return 0, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + if int64(len(p)) > lr.Limit+1 { + p = p[0 : lr.Limit+1] + } + n, err := lr.Reader.Read(p) + lr.Limit -= int64(n) + if lr.Limit < 0 { + return n, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + return n, err +} diff --git a/vendor/github.com/regclient/regclient/internal/muset/muset.go b/vendor/github.com/regclient/regclient/internal/muset/muset.go new file mode 100644 index 000000000..9c800ccba --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/muset/muset.go @@ -0,0 +1,49 @@ +// Package muset is used to acquire a group of mutex locks +package muset + +import "sync" + +// Lock acquires a set of locks without holding some locks acquired while waiting for others to be released +func Lock(muList ...*sync.Mutex) { + if len(muList) <= 0 { + return + } + // dedup entries from the list + for i := len(muList) - 2; i >= 0; i-- { + for j := len(muList) - 1; j > i; j-- { + if muList[i] == muList[j] { + // delete j from the list + muList[j] = muList[len(muList)-1] + muList = muList[:len(muList)-1] + } + } + } + lastBlock := 0 + for { + // start from last blocking mutex + muList[lastBlock].Lock() + // acquire all other locks with TryLock + nextBlock := -1 + for i := range muList { + if i == lastBlock { + continue + } + acquired := muList[i].TryLock() + if !acquired { + nextBlock = i + break + } + } + // if all locks acquired, done + if nextBlock == -1 { + return + } + // unlock + for i := range muList { + if i < nextBlock || i == lastBlock { + muList[i].Unlock() + } + } + lastBlock = nextBlock + } +} diff --git a/vendor/github.com/regclient/regclient/internal/reghttp/http.go b/vendor/github.com/regclient/regclient/internal/reghttp/http.go index 6df968678..e40a7b5bd 100644 --- a/vendor/github.com/regclient/regclient/internal/reghttp/http.go +++ b/vendor/github.com/regclient/regclient/internal/reghttp/http.go @@ -26,12 +26,13 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/config" "github.com/regclient/regclient/internal/auth" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/internal/throttle" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/warning" - "github.com/sirupsen/logrus" - "golang.org/x/sync/semaphore" ) var defaultDelayInit, _ = time.ParseDuration("1s") @@ -45,16 +46,17 @@ const ( // Client is an HTTP client wrapper // It handles features like authentication, retries, backoff delays, TLS settings type Client struct { - host map[string]*clientHost - httpClient *http.Client - rootCAPool [][]byte - rootCADirs []string - retryLimit int - delayInit time.Duration - delayMax time.Duration - log *logrus.Logger - userAgent string - mu sync.Mutex + getConfigHost func(string) *config.Host + host map[string]*clientHost + httpClient *http.Client + rootCAPool [][]byte + rootCADirs []string + retryLimit int + delayInit time.Duration + delayMax time.Duration + log *logrus.Logger + userAgent string + mu sync.Mutex } type clientHost struct { @@ -66,8 +68,7 @@ type clientHost struct { auth map[string]auth.Auth newAuth func() auth.Auth mu sync.Mutex - parallel *semaphore.Weighted - throttle *time.Ticker + ratelimit *time.Ticker } // Req is a request to send to a registry @@ -110,7 +111,7 @@ type clientResp struct { digester digest.Digester reader io.Reader readCur, readMax int64 - parallel *semaphore.Weighted + throttle *throttle.Throttle } // Opts is used to configure client options @@ -152,6 +153,7 @@ func WithCertDirs(dirs []string) Opts { func WithCertFiles(files []string) Opts { return func(c *Client) { for _, f := range files { + //#nosec G304 command is run by a user accessing their own files cert, err := os.ReadFile(f) if err != nil { c.log.WithFields(logrus.Fields{ @@ -165,18 +167,10 @@ func WithCertFiles(files []string) Opts { } } -// WithConfigHosts adds a list of config.Host entries to use for connection settings -func WithConfigHosts(ch []*config.Host) Opts { +// WithConfigHost adds the callback to request a config.Host struct +func WithConfigHost(gch func(string) *config.Host) Opts { return func(c *Client) { - for _, cur := range ch { - if cur.Name == "" { - continue - } - if _, ok := c.host[cur.Name]; !ok { - c.host[cur.Name] = &clientHost{} - } - c.host[cur.Name].config = cur - } + c.getConfigHost = gch } } @@ -272,7 +266,7 @@ func (resp *clientResp) Next() error { if err != nil { return err } - return types.ErrAllRequestsFailed + return errs.ErrAllRequestsFailed } if curHost >= len(hosts) { curHost = 0 @@ -280,33 +274,35 @@ func (resp *clientResp) Next() error { h := hosts[curHost] resp.mirror = h.config.Name + api, okAPI := req.APIs[h.config.API] + if !okAPI { + api, okAPI = req.APIs[""] + } + // check that context isn't canceled/done ctxErr := resp.ctx.Err() if ctxErr != nil { return ctxErr } - - api, okAPI := req.APIs[h.config.API] - if !okAPI { - api, okAPI = req.APIs[""] + // wait for other concurrent requests to this host + throttleErr := h.config.Throttle().Acquire(resp.ctx) + if throttleErr != nil { + return throttleErr } // try each host in a closure to handle all the backoff/dropHost from one place - if h.parallel != nil { - h.parallel.Acquire(resp.ctx, 1) - } - err = func() error { + loopErr := func() error { var err error if !okAPI { dropHost = true - return fmt.Errorf("failed looking up api \"%s\" for host \"%s\": %w", h.config.API, h.config.Name, types.ErrAPINotFound) + return fmt.Errorf("failed looking up api \"%s\" for host \"%s\": %w", h.config.API, h.config.Name, errs.ErrAPINotFound) } if api.Method == "HEAD" && h.config.APIOpts != nil { var disableHead bool disableHead, err = strconv.ParseBool(h.config.APIOpts["disableHead"]) if err == nil && disableHead { dropHost = true - return fmt.Errorf("head requests disabled for host \"%s\": %w", h.config.Name, types.ErrUnsupportedAPI) + return fmt.Errorf("head requests disabled for host \"%s\": %w", h.config.Name, errs.ErrUnsupportedAPI) } } @@ -341,18 +337,19 @@ func (resp *clientResp) Next() error { } // close previous response if resp.resp != nil && resp.resp.Body != nil { - resp.resp.Body.Close() + _ = resp.resp.Body.Close() } // delay for backoff if needed - if !h.backoffUntil.IsZero() && h.backoffUntil.After(time.Now()) { - sleepTime := time.Until(h.backoffUntil) + bu := resp.backoffUntil() + if !bu.IsZero() && bu.After(time.Now()) { + sleepTime := time.Until(bu) c.log.WithFields(logrus.Fields{ "Host": h.config.Name, "Seconds": sleepTime.Seconds(), }).Warn("Sleeping for backoff") select { case <-resp.ctx.Done(): - return types.ErrCanceled + return errs.ErrCanceled case <-time.After(sleepTime): } } @@ -400,18 +397,23 @@ func (resp *clientResp) Next() error { if api.Method != "HEAD" && api.Method != "GET" { scope = scope + ",push" } - hAuth.AddScope(h.config.Hostname, scope) + _ = hAuth.AddScope(h.config.Hostname, scope) } // add auth headers err = hAuth.UpdateRequest(httpReq) if err != nil { - backoff = true + if errors.Is(err, errs.ErrHTTPUnauthorized) { + dropHost = true + } else { + backoff = true + } return err } } - if h.throttle != nil { - <-h.throttle.C + // delay for the rate limit + if h.ratelimit != nil { + <-h.ratelimit.C } // update http client for insecure requests and root certs @@ -451,7 +453,7 @@ func (resp *clientResp) Next() error { err = fmt.Errorf("authentication handler unavailable") } if err != nil { - if errors.Is(err, types.ErrEmptyChallenge) || errors.Is(err, types.ErrNoNewChallenge) || errors.Is(err, types.ErrHTTPUnauthorized) { + if errors.Is(err, errs.ErrEmptyChallenge) || errors.Is(err, errs.ErrNoNewChallenge) || errors.Is(err, errs.ErrHTTPUnauthorized) { c.log.WithFields(logrus.Fields{ "URL": u.String(), "Err": err, @@ -462,7 +464,6 @@ func (resp *clientResp) Next() error { "Err": err, }).Warn("Failed to handle auth request") } - backoff = true dropHost = true } else { err = fmt.Errorf("authentication required") @@ -475,7 +476,7 @@ func (resp *clientResp) Next() error { case http.StatusRequestedRangeNotSatisfiable: // if range request error (blob push), drop mirror for this req, but other requests don't need backoff dropHost = true - case http.StatusTooManyRequests, http.StatusRequestTimeout, http.StatusGatewayTimeout: + case http.StatusTooManyRequests, http.StatusRequestTimeout, http.StatusGatewayTimeout, http.StatusInternalServerError: // server is likely overloaded, backoff but still retry backoff = true default: @@ -489,7 +490,7 @@ func (resp *clientResp) Next() error { }).Debug("Request failed") errHTTP := HTTPError(resp.resp.StatusCode) errBody, _ := io.ReadAll(resp.resp.Body) - resp.resp.Body.Close() + _ = resp.resp.Body.Close() return fmt.Errorf("request failed: %w: %s", errHTTP, errBody) } @@ -506,20 +507,20 @@ func (resp *clientResp) Next() error { // verify Content-Range header when range request used, fail if missing if httpReq.Header.Get("Range") != "" && resp.resp.Header.Get("Content-Range") == "" { dropHost = true - resp.resp.Body.Close() + _ = resp.resp.Body.Close() return fmt.Errorf("range request not supported by server") } return nil }() // return on success - if err == nil { - resp.parallel = h.parallel - resp.backoffClear() + if loopErr == nil { + resp.throttle = h.config.Throttle() return nil } // backoff, dropHost, and/or go to next host in the list - if h.parallel != nil { - h.parallel.Release(1) + throttleErr = h.config.Throttle().Release(resp.ctx) + if throttleErr != nil { + return throttleErr } if backoff { if api.IgnoreErr { @@ -533,6 +534,11 @@ func (resp *clientResp) Next() error { } } } + // when error does not allow retries, abort with the last known err value + if err != nil && errors.Is(loopErr, errs.ErrNotRetryable) { + return err + } + err = loopErr if dropHost { hosts = append(hosts[:curHost], hosts[curHost+1:]...) } else if !retryHost { @@ -550,13 +556,14 @@ func (resp *clientResp) Read(b []byte) (int, error) { return 0, io.EOF } if resp.resp == nil { - return 0, types.ErrNotFound + return 0, errs.ErrNotFound } // perform the read i, err := resp.reader.Read(b) resp.readCur += int64(i) if err == io.EOF || err == io.ErrUnexpectedEOF { if resp.resp.Request.Method == "HEAD" || resp.readCur >= resp.readMax { + resp.backoffClear() resp.done = true } else { // short read, retry? @@ -565,8 +572,10 @@ func (resp *clientResp) Read(b []byte) (int, error) { "contentLen": resp.readMax, }).Debug("EOF before reading all content, retrying") // retry - resp.backoffSet() - respErr := resp.Next() + respErr := resp.backoffSet() + if respErr == nil { + respErr = resp.Next() + } // unrecoverable EOF if respErr != nil { resp.client.log.WithFields(logrus.Fields{ @@ -584,8 +593,9 @@ func (resp *clientResp) Read(b []byte) (int, error) { "expected": resp.digest, "computed": resp.digester.Digest(), }).Warn("Digest mismatch") + _ = resp.backoffSet() resp.done = true - return i, fmt.Errorf("%w, expected %s, computed %s", types.ErrDigestMismatch, + return i, fmt.Errorf("%w, expected %s, computed %s", errs.ErrDigestMismatch, resp.digest.String(), resp.digester.Digest().String()) } } @@ -597,12 +607,15 @@ func (resp *clientResp) Read(b []byte) (int, error) { } func (resp *clientResp) Close() error { - if resp.parallel != nil { - resp.parallel.Release(1) - resp.parallel = nil + if resp.throttle != nil { + _ = resp.throttle.Release(resp.ctx) + resp.throttle = nil } if resp.resp == nil { - return types.ErrNotFound + return errs.ErrNotFound + } + if !resp.done { + resp.backoffClear() } resp.done = true return resp.resp.Body.Close() @@ -682,12 +695,20 @@ func (resp *clientResp) backoffSet() error { ch.backoffUntil = time.Now().Add(sleepTime) if ch.backoffCur >= c.retryLimit { - return fmt.Errorf("%w: backoffs %d", types.ErrBackoffLimit, ch.backoffCur) + return fmt.Errorf("%w: backoffs %d", errs.ErrBackoffLimit, ch.backoffCur) } return nil } +func (resp *clientResp) backoffUntil() time.Time { + c := resp.client + c.mu.Lock() + defer c.mu.Unlock() + ch := c.host[resp.mirror] + return ch.backoffUntil +} + func (c *Client) getHost(host string) *clientHost { c.mu.Lock() defer c.mu.Unlock() @@ -699,22 +720,31 @@ func (c *Client) getHost(host string) *clientHost { h = &clientHost{} } if h.config == nil { - h.config = config.HostNewName(host) + if c.getConfigHost != nil { + h.config = c.getConfigHost(host) + } else { + h.config = config.HostNewName(host) + } + // check for normalized hostname + if h.config.Name != host { + host = h.config.Name + hNormal, ok := c.host[host] + if ok && hNormal.initialized { + return hNormal + } + } } if h.auth == nil { h.auth = map[string]auth.Auth{} } - if h.throttle == nil && h.config.ReqPerSec > 0 { - h.throttle = time.NewTicker(time.Duration(float64(time.Second) / h.config.ReqPerSec)) - } - if h.parallel == nil && h.config.ReqConcurrent > 0 { - h.parallel = semaphore.NewWeighted(h.config.ReqConcurrent) + if h.ratelimit == nil && h.config.ReqPerSec > 0 { + h.ratelimit = time.NewTicker(time.Duration(float64(time.Second) / h.config.ReqPerSec)) } if h.httpClient == nil { h.httpClient = c.httpClient // update http client for insecure requests and root certs - if h.config.TLS == config.TLSInsecure || len(c.rootCAPool) > 0 || len(c.rootCADirs) > 0 || h.config.RegCert != "" { + if h.config.TLS == config.TLSInsecure || len(c.rootCAPool) > 0 || len(c.rootCADirs) > 0 || h.config.RegCert != "" || (h.config.ClientCert != "" && h.config.ClientKey != "") { // create a new client and modify the transport httpClient := *c.httpClient if httpClient.Transport == nil { @@ -726,6 +756,7 @@ func (c *Client) getHost(host string) *clientHost { if t.TLSClientConfig != nil { tlsc = t.TLSClientConfig.Clone() } else { + //#nosec G402 the default TLS 1.2 minimum version is allowed to support older registries tlsc = &tls.Config{} } if h.config.TLS == config.TLSInsecure { @@ -740,6 +771,16 @@ func (c *Client) getHost(host string) *clientHost { tlsc.RootCAs = rootPool } } + if h.config.ClientCert != "" && h.config.ClientKey != "" { + cert, err := tls.X509KeyPair([]byte(h.config.ClientCert), []byte(h.config.ClientKey)) + if err != nil { + c.log.WithFields(logrus.Fields{ + "err": err, + }).Warn("failed to configure client certs") + } else { + tlsc.Certificates = []tls.Certificate{cert} + } + } t.TLSClientConfig = tlsc httpClient.Transport = t } @@ -790,15 +831,15 @@ func (ch *clientHost) AuthCreds() func(h string) auth.Cred { func HTTPError(statusCode int) error { switch statusCode { case 401: - return fmt.Errorf("%w [http %d]", types.ErrHTTPUnauthorized, statusCode) + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) case 403: - return fmt.Errorf("%w [http %d]", types.ErrHTTPUnauthorized, statusCode) + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) case 404: - return fmt.Errorf("%w [http %d]", types.ErrNotFound, statusCode) + return fmt.Errorf("%w [http %d]", errs.ErrNotFound, statusCode) case 429: - return fmt.Errorf("%w [http %d]", types.ErrHTTPRateLimit, statusCode) + return fmt.Errorf("%w [http %d]", errs.ErrHTTPRateLimit, statusCode) default: - return fmt.Errorf("%w: %s [http %d]", types.ErrHTTPStatus, http.StatusText(statusCode), statusCode) + return fmt.Errorf("%w: %s [http %d]", errs.ErrHTTPStatus, http.StatusText(statusCode), statusCode) } } @@ -817,7 +858,7 @@ func makeRootPool(rootCAPool [][]byte, rootCADirs []string, hostname string, hos files, err := os.ReadDir(hostDir) if err != nil { if !os.IsNotExist(err) { - return nil, fmt.Errorf("failed to read directory %s: %v", hostDir, err) + return nil, fmt.Errorf("failed to read directory %s: %w", hostDir, err) } continue } @@ -827,9 +868,10 @@ func makeRootPool(rootCAPool [][]byte, rootCADirs []string, hostname string, hos } if strings.HasSuffix(f.Name(), ".crt") { f := filepath.Join(hostDir, f.Name()) + //#nosec G304 file from a known directory and extension read by the user running the command on their own host cert, err := os.ReadFile(f) if err != nil { - return nil, fmt.Errorf("failed to read %s: %v", f, err) + return nil, fmt.Errorf("failed to read %s: %w", f, err) } if ok := pool.AppendCertsFromPEM(cert); !ok { return nil, fmt.Errorf("failed to import cert from %s", f) diff --git a/vendor/github.com/regclient/regclient/internal/rwfs/mem.go b/vendor/github.com/regclient/regclient/internal/rwfs/mem.go index 8dc25b9a8..f553d965a 100644 --- a/vendor/github.com/regclient/regclient/internal/rwfs/mem.go +++ b/vendor/github.com/regclient/regclient/internal/rwfs/mem.go @@ -7,7 +7,10 @@ import ( "path" "sort" "strings" + "sync" "time" + + "github.com/regclient/regclient/internal/muset" ) // in memory implementation of rwfs @@ -22,10 +25,12 @@ type MemChild interface{} type MemDir struct { child map[string]MemChild mod time.Time + mu sync.Mutex } type MemFile struct { b []byte mod time.Time + mu sync.Mutex } type MemDirFP struct { f *MemDir @@ -68,6 +73,8 @@ func (o *MemFS) Mkdir(name string, perm fs.FileMode) error { Err: err, } } + memDir.mu.Lock() + defer memDir.mu.Unlock() if _, ok := memDir.child[base]; ok { return &fs.PathError{ Op: "mkdir", @@ -90,6 +97,8 @@ func (o *MemFS) OpenFile(name string, flags int, perm fs.FileMode) (RWFile, erro Err: err, } } + memDir.mu.Lock() + defer memDir.mu.Unlock() var child MemChild if file == "." || file == "" { child = memDir @@ -168,6 +177,8 @@ func (o *MemFS) Remove(name string) error { Err: err, } } + memDir.mu.Lock() + defer memDir.mu.Unlock() if child, ok := memDir.child[file]; ok { switch v := child.(type) { case *MemFile: @@ -221,6 +232,16 @@ func (o *MemFS) Rename(oldName, newName string) error { Err: err, } } + lockList := []*sync.Mutex{&(memDirOld.mu)} + if dirOld != dirNew { + lockList = append(lockList, &(memDirNew.mu)) + } + muset.Lock(lockList...) + defer func() { + for _, l := range lockList { + l.Unlock() + } + }() childOld, okOld := memDirOld.child[fileOld] if !okOld { return &fs.PathError{ @@ -299,6 +320,44 @@ func (o *MemFS) Rename(oldName, newName string) error { return nil } +func (o *MemFS) Stat(name string) (fs.FileInfo, error) { + dir, entry := path.Split(name) + memDir, err := o.getDir(dir) + if err != nil { + return nil, &fs.PathError{ + Op: "stat", + Path: name, + Err: err, + } + } + memDir.mu.Lock() + defer memDir.mu.Unlock() + memEntry, ok := memDir.child[entry] + if !ok { + return nil, &fs.PathError{ + Op: "stat", + Path: name, + Err: fs.ErrNotExist, + } + } + switch vEntry := memEntry.(type) { + case *MemFile: + vEntry.mu.Lock() + defer vEntry.mu.Unlock() + return NewFI(entry, int64(len(vEntry.b)), time.Time{}, 0), nil + case *MemDir: + vEntry.mu.Lock() + defer vEntry.mu.Unlock() + return NewFI(entry, 4096, vEntry.mod, fs.ModeDir), nil + default: + return nil, &fs.PathError{ + Op: "stat", + Path: name, + Err: fs.ErrInvalid, + } + } +} + func (o *MemFS) Sub(name string) (*MemFS, error) { if name == "." { return o, nil @@ -338,7 +397,9 @@ func (o *MemFS) getDir(dir string) (*MemDir, error) { if el == "." || el == "" { continue } + cur.mu.Lock() next, ok := cur.child[el] + cur.mu.Unlock() if !ok { return nil, fs.ErrNotExist } @@ -357,6 +418,8 @@ func (mfp *MemFileFP) Close() error { } func (mfp *MemFileFP) Read(b []byte) (int, error) { + mfp.f.mu.Lock() + defer mfp.f.mu.Unlock() lc := copy(b, mfp.f.b[mfp.cur:]) mfp.cur += lc if len(mfp.f.b) <= mfp.cur { @@ -366,6 +429,8 @@ func (mfp *MemFileFP) Read(b []byte) (int, error) { } func (mfp *MemFileFP) Seek(offset int64, whence int) (int64, error) { + mfp.f.mu.Lock() + defer mfp.f.mu.Unlock() switch whence { case io.SeekStart: mfp.cur = whence @@ -380,6 +445,8 @@ func (mfp *MemFileFP) Seek(offset int64, whence int) (int64, error) { } func (mfp *MemFileFP) Stat() (fs.FileInfo, error) { + mfp.f.mu.Lock() + defer mfp.f.mu.Unlock() fi := NewFI(mfp.name, int64(len(mfp.f.b)), time.Time{}, 0) return fi, nil } @@ -388,6 +455,8 @@ func (mfp *MemFileFP) Write(b []byte) (n int, err error) { if len(b) == 0 { return 0, nil } + mfp.f.mu.Lock() + defer mfp.f.mu.Unlock() // use copy to overwrite existing contents if mfp.cur < len(mfp.f.b) { l := copy(mfp.f.b[mfp.cur:], b) @@ -418,6 +487,8 @@ func (mdp *MemDirFP) Read(b []byte) (int, error) { // TODO: implement func (mdp *MemDirFP) Seek func (mdp *MemDirFP) ReadDir(n int) ([]fs.DirEntry, error) { + mdp.f.mu.Lock() + defer mdp.f.mu.Unlock() names := mdp.filenames(mdp.cur, n) mdp.cur += len(names) des := make([]fs.DirEntry, len(names)) @@ -453,6 +524,8 @@ func (mdp *MemDirFP) ReadDir(n int) ([]fs.DirEntry, error) { } func (mdp *MemDirFP) Stat() (fs.FileInfo, error) { + mdp.f.mu.Lock() + defer mdp.f.mu.Unlock() fi := NewFI(mdp.name, 4096, mdp.f.mod, fs.ModeDir) return fi, nil } diff --git a/vendor/github.com/regclient/regclient/internal/rwfs/os.go b/vendor/github.com/regclient/regclient/internal/rwfs/os.go index a9876f348..e5955bcde 100644 --- a/vendor/github.com/regclient/regclient/internal/rwfs/os.go +++ b/vendor/github.com/regclient/regclient/internal/rwfs/os.go @@ -49,6 +49,7 @@ func (o *OSFS) Create(name string) (WFile, error) { if err != nil { return nil, err } + // #nosec G304 rwfs/os.go is a pass through to the filesystem fh, err := os.Create(file) if err != nil { return nil, err @@ -88,6 +89,7 @@ func (o *OSFS) OpenFile(name string, flag int, perm fs.FileMode) (RWFile, error) if err != nil { return nil, err } + // #nosec G304 rwfs/os.go is a pass through to the filesystem fh, err := os.OpenFile(file, flag, perm) if err != nil { return nil, err @@ -102,6 +104,7 @@ func (o *OSFS) Open(name string) (fs.File, error) { if err != nil { return nil, err } + // #nosec G304 rwfs/os.go is a pass through to the filesystem fh, err := os.Open(file) if err != nil { return nil, err @@ -139,6 +142,16 @@ func (o *OSFS) Rename(oldName, newName string) error { return os.Rename(oldFile, newFile) } +// Stat returns a FileInfo describing the named file. +func (o *OSFS) Stat(name string) (fs.FileInfo, error) { + full, err := o.join("stat", name) + if err != nil { + return nil, err + } + return os.Stat(full) +} + +// Sub returns an FS corresponding to the subtree rooted at dir. func (o *OSFS) Sub(name string) (*OSFS, error) { if name == "." { return o, nil diff --git a/vendor/github.com/regclient/regclient/internal/rwfs/rwfs.go b/vendor/github.com/regclient/regclient/internal/rwfs/rwfs.go index 918b04bd1..b1f78c940 100644 --- a/vendor/github.com/regclient/regclient/internal/rwfs/rwfs.go +++ b/vendor/github.com/regclient/regclient/internal/rwfs/rwfs.go @@ -3,6 +3,7 @@ package rwfs import ( "errors" + "fmt" "io" "io/fs" "math/rand" @@ -88,10 +89,13 @@ func CopyRecursive(srcFS fs.FS, srcName string, destFS RWFS, destName string) er return err } sfi, err := sfh.Stat() - sfh.Close() + errC := sfh.Close() if err != nil { return err } + if errC != nil { + return fmt.Errorf("failed to close source file handle: %w", errC) + } if !sfi.IsDir() { return Copy(srcFS, srcName, destFS, destName) } @@ -131,9 +135,10 @@ func CreateTemp(rwfs RWFS, dir, pattern string) (RWFile, error) { if i >= 0 { prefix, suffix = pattern[:i], pattern[i+1:] } - prefix = filepath.Join(dir, prefix) + prefix = filepath.Clean(dir) + string(filepath.Separator) + prefix try := 0 for { + //#nosec G404 locking and for loop used to ensure rnd value is unique rnd := strconv.FormatUint(rand.Uint64(), 10) name := prefix + rnd + suffix f, err := rwfs.OpenFile(name, O_RDWR|O_CREATE|O_EXCL, 0600) @@ -150,44 +155,40 @@ func CreateTemp(rwfs RWFS, dir, pattern string) (RWFile, error) { // MkdirAll creates a directory, including all parent directories func MkdirAll(rwfs RWFS, name string, perm fs.FileMode) error { - parts := strings.Split(name, "/") - prefix := "" - if strings.HasPrefix(name, "/") { - prefix = "/" - } - for i := range parts { - // assemble directory up to this point - cur := prefix + path.Join(parts[:i+1]...) - if cur == "" || cur == "." || cur == "/" { - continue + fi, err := Stat(rwfs, name) + if err == nil { + if fi.IsDir() { + return nil } - fi, err := Stat(rwfs, cur) - if errors.Is(err, fs.ErrNotExist) { - // missing, create - err := rwfs.Mkdir(cur, perm) - if err != nil { - return &fs.PathError{ - Op: "mkdir", - Path: cur, - Err: err, - } - } - } else if err != nil { - // unknown errors - return &fs.PathError{ - Op: "mkdir", - Path: cur, - Err: err, - } - } else if !fi.IsDir() { - // can't mkdir on existing file - return &fs.PathError{ - Op: "mkdir", - Path: cur, - Err: fs.ErrExist, + return &fs.PathError{ + Op: "mkdir", + Path: name, + Err: fs.ErrExist, + } + } + // create parent + i := len(name) + for i > 0 && name[i-1] == '/' { // remove trailing slash + i-- + } + for i > 0 && name[i-1] != '/' { // remove up to next slash + i-- + } + if i > 1 { + err = MkdirAll(rwfs, name[:i-1], perm) + if err != nil { + return err + } + } + // create directory + if name != "" && name != "." && name != "/" { + err = rwfs.Mkdir(name, perm) + if err != nil { + if fi, err := Stat(rwfs, name); err == nil && fi.IsDir() { + return nil } + return err } - // exists and is a directory, next } return nil } @@ -196,6 +197,12 @@ func MkdirAll(rwfs RWFS, name string, perm fs.FileMode) error { // Stat returns the FileInfo for a specified file func Stat(rfs fs.FS, name string) (fs.FileInfo, error) { + sInt, ok := rfs.(interface { + Stat(name string) (fs.FileInfo, error) + }) + if ok { + return sInt.Stat(name) + } fh, err := rfs.Open(name) if err != nil { return nil, err diff --git a/vendor/github.com/regclient/regclient/internal/strparse/strparse.go b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go new file mode 100644 index 000000000..f67fdafe9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go @@ -0,0 +1,91 @@ +// Package strparse is used to parse strings +package strparse + +import ( + "fmt" + + "github.com/regclient/regclient/types/errs" +) + +// SplitCSKV splits a comma separated key=value list into a map +func SplitCSKV(s string) (map[string]string, error) { + state := "key" + key := "" + val := "" + result := map[string]string{} + procKV := func() { + if key != "" { + result[key] = val + } + state = "key" + key = "" + val = "" + } + for _, c := range s { + switch state { + case "key": + switch c { + case '"': + state = "keyQuote" + case '\\': + state = "keyEscape" + case '=': + state = "val" + case ',': + procKV() + default: + key = key + string(c) + } + case "keyQuote": + switch c { + case '"': + state = "key" + case '\\': + state = "keyEscapeQuote" + default: + key = key + string(c) + } + case "keyEscape": + key = key + string(c) + state = "key" + case "keyEscapeQuote": + key = key + string(c) + state = "keyQuote" + case "val": + switch c { + case '"': + state = "valQuote" + case ',': + procKV() + case '\\': + state = "valEscape" + default: + val = val + string(c) + } + case "valQuote": + switch c { + case '"': + state = "val" + case '\\': + state = "valEscapeQuote" + default: + val = val + string(c) + } + case "valEscape": + val = val + string(c) + state = "val" + case "valEscapeQuote": + val = val + string(c) + state = "valQuote" + default: + return nil, fmt.Errorf("unhandled state: %s", state) + } + } + switch state { + case "val", "key": + procKV() + default: + return nil, fmt.Errorf("string parsing failed, end state: %s%.0w", state, errs.ErrParsingFailed) + } + return result, nil +} diff --git a/vendor/github.com/regclient/regclient/internal/throttle/throttle.go b/vendor/github.com/regclient/regclient/internal/throttle/throttle.go new file mode 100644 index 000000000..9c6b0f7c8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/throttle/throttle.go @@ -0,0 +1,192 @@ +// Package throttle is used to limit concurrent activities +package throttle + +import ( + "context" + "fmt" +) + +type token struct{} +type Throttle struct { + ch chan token +} +type key int +type valMany struct { + tList []*Throttle +} + +var keyMany key + +func New(count int) *Throttle { + ch := make(chan token, count) + return &Throttle{ch: ch} +} + +func (t *Throttle) checkContext(ctx context.Context) (bool, error) { + tCtx := ctx.Value(keyMany) + if tCtx == nil { + return false, nil + } + tCtxVal, ok := tCtx.(*valMany) + if !ok { + return true, fmt.Errorf("context value is not a throttle list") + } + if tCtxVal.tList == nil { + return false, nil + } + for _, cur := range tCtxVal.tList { + if cur == t { + // instance already locked + return true, nil + } + } + return true, fmt.Errorf("cannot acquire new locks during a transaction") +} + +func (t *Throttle) Acquire(ctx context.Context) error { + if t == nil { + return nil + } + // check if already acquired in context + if found, err := t.checkContext(ctx); found { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case t.ch <- token{}: + return nil + } +} + +func (t *Throttle) Release(ctx context.Context) error { + if t == nil { + return nil + } + // check if already acquired in context + if found, err := t.checkContext(ctx); found { + return err + } + select { + case <-t.ch: + return nil + default: + return fmt.Errorf("failed to release throttle") + } +} + +func (t *Throttle) TryAcquire(ctx context.Context) (bool, error) { + if t == nil { + return true, nil + } + // check if already acquired in context + if found, err := t.checkContext(ctx); found { + return err == nil, err + } + select { + case t.ch <- token{}: + return true, nil + default: + return false, nil + } +} + +func AcquireMulti(ctx context.Context, tList []*Throttle) (context.Context, error) { + // verify context not already holding locks + tCtx := ctx.Value(keyMany) + if tCtx != nil { + if tCtxVal, ok := tCtx.(*valMany); ok && tCtxVal.tList != nil { + return ctx, fmt.Errorf("throttle cannot manage concurrent transactions") + } + } + if len(tList) <= 0 { + // noop? + return ctx, nil + } + // dedup entries from the list + for i := len(tList) - 2; i >= 0; i-- { + for j := len(tList) - 1; j > i; j-- { + if tList[i] == tList[j] { + // delete j from the list + tList[j] = tList[len(tList)-1] + tList = tList[:len(tList)-1] + } + } + } + lockI := 0 + for { + err := tList[lockI].Acquire(ctx) + if err != nil { + return ctx, err + } + acquired := true + i := 0 + for i < len(tList) { + if i != lockI { + acquired, err = tList[i].TryAcquire(ctx) + if err != nil || !acquired { + break + } + } + i++ + } + if err == nil && acquired { + break + } + // TODO: errors on Release should be included using errors.Join once 1.20 is the minimum version + // cleanup on failed attempt + if lockI > i { + _ = tList[lockI].Release(ctx) + } + // track blocking index + lockI = i + for i > 0 { + i-- + _ = tList[i].Release(ctx) + } + // abort on errors + if err != nil { + return ctx, err + } + } + // success, update context + newCtx := context.WithValue(ctx, keyMany, &valMany{tList: tList}) + return newCtx, nil +} + +func ReleaseMulti(ctx context.Context, tList []*Throttle) error { + // verify context shows locked values + tCtx := ctx.Value(keyMany) + if tCtx == nil { + return fmt.Errorf("no transaction found to release") + } + tCtxVal, ok := tCtx.(*valMany) + if !ok || tCtxVal.tList == nil { + return fmt.Errorf("no transaction found to release") + } + // dedup entries from the list + for i := len(tList) - 2; i >= 0; i-- { + for j := len(tList) - 1; j > i; j-- { + if tList[i] == tList[j] { + // delete j from the list + tList[j] = tList[len(tList)-1] + tList = tList[:len(tList)-1] + } + } + } + // TODO: release from tList, tCtx, or compare and error if diff? + for _, t := range tList { + if t == nil { + continue + } + // cannot call t.Release since context has value defined + select { + case <-t.ch: + default: + return fmt.Errorf("failed to release throttle") + } + } + // modify context value to track release + tCtxVal.tList = nil + return nil +} diff --git a/vendor/github.com/regclient/regclient/internal/units/size.go b/vendor/github.com/regclient/regclient/internal/units/size.go index 7ea0fd49e..772e48737 100644 --- a/vendor/github.com/regclient/regclient/internal/units/size.go +++ b/vendor/github.com/regclient/regclient/internal/units/size.go @@ -1,4 +1,4 @@ -// Package units is copied from https://github.com/docker/go-units +// Package units is taken from https://github.com/docker/go-units package units // Copyright 2015 Docker, Inc. @@ -17,107 +17,41 @@ package units import ( "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { +func getSizeAndUnit(size float64, base float64, unitList []string) (float64, string) { i := 0 - unitsLimit := len(_map) - 1 + unitsLimit := len(unitList) - 1 for size >= base && i < unitsLimit { size = size / base i++ } - return size, _map[i] + return size, unitList[i] } -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) +// CustomSize returns a human-readable approximation of a size using custom format. +func CustomSize(format string, size float64, base float64, unitList []string) string { + size, unit := getSizeAndUnit(size, base, unitList) return fmt.Sprintf(format, size, unit) } -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { +// HumanSizeWithPrecision allows the size to be in any precision. +func HumanSizeWithPrecision(size float64, width, precision int) string { size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) + return fmt.Sprintf("%*.*f%s", width, precision, size, unit) } // HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +// with a width of 5 (eg. "2.746MB", "796.0KB"). func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) + return HumanSizeWithPrecision(size, 5, 3) } // BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +// mebibytes, gibibytes, or tebibytes (eg. "44.2kiB", "17.6MiB"). func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil + return CustomSize("%5.3f%s", size, 1024.0, binaryAbbrs) } diff --git a/vendor/github.com/regclient/regclient/internal/version/version.go b/vendor/github.com/regclient/regclient/internal/version/version.go index 9e260eca5..5e5715e58 100644 --- a/vendor/github.com/regclient/regclient/internal/version/version.go +++ b/vendor/github.com/regclient/regclient/internal/version/version.go @@ -27,6 +27,6 @@ func (i Info) MarshalPretty() ([]byte, error) { fmt.Fprintf(tw, "Platform:\t%s\n", i.Platform) fmt.Fprintf(tw, "GoVer:\t%s\n", i.GoVer) fmt.Fprintf(tw, "GoCompiler:\t%s\n", i.GoCompiler) - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } diff --git a/vendor/github.com/regclient/regclient/internal/wraperr/error.go b/vendor/github.com/regclient/regclient/internal/wraperr/error.go deleted file mode 100644 index ca583a8e8..000000000 --- a/vendor/github.com/regclient/regclient/internal/wraperr/error.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package wraperr packages an error with another wrapped error -// This allows errors.Is to work without directly injecting the string of the wrapped error -package wraperr - -// WrapErr wraps an underlying error with another error -// -// Example usage: -// -// var ErrLimit = errors.New("Limit reached") -// ... -// func someFunc() error { -// return wrapper.WrapErr{Err: fmt.Errorf("Limit %d reached on %s", limit, instance), Wrap: ErrLimit} -// } -// ... -// if errors.Is(err, ErrLimit) { ... } -// -// This makes it easier to include a custom error message while also allowing -type WrapErr struct { - Err error - Wrap error -} - -// New returns a new WrapErr -func New(err, wrap error) WrapErr { - return WrapErr{Err: err, Wrap: wrap} -} - -func (e WrapErr) Error() string { - return e.Err.Error() -} - -func (e WrapErr) Unwrap() error { - return e.Wrap -} diff --git a/vendor/github.com/regclient/regclient/manifest.go b/vendor/github.com/regclient/regclient/manifest.go index 65daab457..2aa767ef6 100644 --- a/vendor/github.com/regclient/regclient/manifest.go +++ b/vendor/github.com/regclient/regclient/manifest.go @@ -2,20 +2,22 @@ package regclient import ( "context" + "fmt" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" "github.com/regclient/regclient/types/ref" ) type manifestOpt struct { - d types.Descriptor + d descriptor.Descriptor schemeOpts []scheme.ManifestOpts requireDigest bool } -// ManifestOpts define options for the Manifest* commands +// ManifestOpts define options for the Manifest* commands. type ManifestOpts func(*manifestOpt) // WithManifest passes a manifest to ManifestDelete. @@ -26,13 +28,15 @@ func WithManifest(m manifest.Manifest) ManifestOpts { } // WithManifestCheckReferrers checks for referrers field on ManifestDelete. +// This will update the client managed referrer listing. func WithManifestCheckReferrers() ManifestOpts { return func(opts *manifestOpt) { opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestCheckReferrers()) } } -// WithManifestChild for ManifestPut. +// WithManifestChild for ManifestPut indicates the manifest is not the top level manifest being copied. +// This is used by the ocidir scheme to determine what entries to include in the index.json. func WithManifestChild() ManifestOpts { return func(opts *manifestOpt) { opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestChild()) @@ -41,7 +45,7 @@ func WithManifestChild() ManifestOpts { // WithManifestDesc includes the descriptor for ManifestGet. // This is used to automatically extract a Data field if available. -func WithManifestDesc(d types.Descriptor) ManifestOpts { +func WithManifestDesc(d descriptor.Descriptor) ManifestOpts { return func(opts *manifestOpt) { opts.d = d } @@ -54,10 +58,13 @@ func WithManifestRequireDigest() ManifestOpts { } } -// ManifestDelete removes a manifest, including all tags pointing to that registry -// The reference must include the digest to delete (see TagDelete for deleting a tag) -// All tags pointing to the manifest will be deleted +// ManifestDelete removes a manifest, including all tags pointing to that registry. +// The reference must include the digest to delete (see TagDelete for deleting a tag). +// All tags pointing to the manifest will be deleted. func (rc *RegClient) ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} for _, fn := range opts { fn(&opt) @@ -69,8 +76,11 @@ func (rc *RegClient) ManifestDelete(ctx context.Context, r ref.Ref, opts ...Mani return schemeAPI.ManifestDelete(ctx, r, opt.schemeOpts...) } -// ManifestGet retrieves a manifest +// ManifestGet retrieves a manifest. func (rc *RegClient) ManifestGet(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} for _, fn := range opts { fn(&opt) @@ -93,8 +103,11 @@ func (rc *RegClient) ManifestGet(ctx context.Context, r ref.Ref, opts ...Manifes return schemeAPI.ManifestGet(ctx, r) } -// ManifestHead queries for the existence of a manifest and returns metadata (digest, media-type, size) +// ManifestHead queries for the existence of a manifest and returns metadata (digest, media-type, size). func (rc *RegClient) ManifestHead(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} for _, fn := range opts { fn(&opt) @@ -113,9 +126,12 @@ func (rc *RegClient) ManifestHead(ctx context.Context, r ref.Ref, opts ...Manife return m, err } -// ManifestPut pushes a manifest -// Any descriptors referenced by the manifest typically need to be pushed first +// ManifestPut pushes a manifest. +// Any descriptors referenced by the manifest typically need to be pushed first. func (rc *RegClient) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} for _, fn := range opts { fn(&opt) diff --git a/vendor/github.com/regclient/regclient/ping.go b/vendor/github.com/regclient/regclient/ping.go new file mode 100644 index 000000000..4298f7ac3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/ping.go @@ -0,0 +1,18 @@ +package regclient + +import ( + "context" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping verifies access to a registry or equivalent. +func (rc *RegClient) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return ping.Result{}, err + } + + return schemeAPI.Ping(ctx, r) +} diff --git a/vendor/github.com/regclient/regclient/pkg/archive/compress.go b/vendor/github.com/regclient/regclient/pkg/archive/compress.go index 74dcaef49..0e3ab76f6 100644 --- a/vendor/github.com/regclient/regclient/pkg/archive/compress.go +++ b/vendor/github.com/regclient/regclient/pkg/archive/compress.go @@ -6,6 +6,8 @@ import ( "compress/bzip2" "compress/gzip" "io" + + "github.com/ulikunitz/xz" ) // CompressType identifies the detected compression type @@ -46,6 +48,9 @@ func Compress(r io.Reader, oComp CompressType) (io.Reader, error) { return compressGzip(br) case CompressBzip2: return compressGzip(bzip2.NewReader(br)) + case CompressXz: + cbr, _ := xz.NewReader(br) + return compressGzip(cbr) } } // No other types currently supported @@ -79,7 +84,7 @@ func Decompress(r io.Reader) (io.Reader, error) { case CompressGzip: return gzip.NewReader(br) case CompressXz: - return br, ErrXzUnsupported + return xz.NewReader(br) default: return br, nil } diff --git a/vendor/github.com/regclient/regclient/pkg/archive/tar.go b/vendor/github.com/regclient/regclient/pkg/archive/tar.go index 375fe6fcd..705e9bac4 100644 --- a/vendor/github.com/regclient/regclient/pkg/archive/tar.go +++ b/vendor/github.com/regclient/regclient/pkg/archive/tar.go @@ -50,7 +50,7 @@ func Tar(ctx context.Context, path string, w io.Writer, opts ...TarOpts) error { defer tw.Close() // walk the path performing a recursive tar - filepath.Walk(path, func(file string, fi os.FileInfo, err error) error { + err := filepath.Walk(path, func(file string, fi os.FileInfo, err error) error { // return any errors filepath encounters accessing the file if err != nil { return err @@ -83,6 +83,7 @@ func Tar(ctx context.Context, path string, w io.Writer, opts ...TarOpts) error { // open file and copy contents into tar writer if header.Typeflag == tar.TypeReg && header.Size > 0 { + //#nosec G304 filename is limited to provided path directory f, err := os.Open(file) if err != nil { return err @@ -90,12 +91,14 @@ func Tar(ctx context.Context, path string, w io.Writer, opts ...TarOpts) error { if _, err = io.Copy(tw, f); err != nil { return err } - f.Close() + err = f.Close() + if err != nil { + return fmt.Errorf("failed to close file: %w", err) + } } - return nil }) - return nil + return err } // Extract Tar @@ -139,15 +142,19 @@ func Extract(ctx context.Context, path string, r io.Reader, opts ...TarOpts) err } case tar.TypeReg: // TODO: configure file mode, creation timestamp, etc + //#nosec G304 filename is limited to provided path directory fh, err := os.Create(fn) if err != nil { return err } - n, err := io.Copy(fh, rt) - fh.Close() + n, err := io.CopyN(fh, rt, hdr.Size) + errC := fh.Close() if err != nil { return err } + if errC != nil { + return fmt.Errorf("failed to close file: %w", errC) + } if n != hdr.Size { return fmt.Errorf("size mismatch extracting \"%s\", expected %d, extracted %d", hdr.Name, hdr.Size, n) } diff --git a/vendor/github.com/regclient/regclient/referrer.go b/vendor/github.com/regclient/regclient/referrer.go index 3cacb326b..a4411bbe2 100644 --- a/vendor/github.com/regclient/regclient/referrer.go +++ b/vendor/github.com/regclient/regclient/referrer.go @@ -2,14 +2,20 @@ package regclient import ( "context" + "fmt" "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/referrer" ) -// ReferrerList retrieves a manifest +// ReferrerList retrieves a list of referrers to a manifest. +// The descriptor list should contain manifests that each have a subject field matching the requested ref. func (rc *RegClient) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + if !r.IsSet() { + return referrer.ReferrerList{}, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return referrer.ReferrerList{}, err diff --git a/vendor/github.com/regclient/regclient/regclient.go b/vendor/github.com/regclient/regclient/regclient.go index 9fba67ff9..660bbdd46 100644 --- a/vendor/github.com/regclient/regclient/regclient.go +++ b/vendor/github.com/regclient/regclient/regclient.go @@ -1,4 +1,4 @@ -// Package regclient is used to access OCI registries +// Package regclient is used to access OCI registries. package regclient import ( @@ -7,29 +7,30 @@ import ( "fmt" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/config" "github.com/regclient/regclient/internal/rwfs" "github.com/regclient/regclient/internal/version" "github.com/regclient/regclient/scheme" "github.com/regclient/regclient/scheme/ocidir" "github.com/regclient/regclient/scheme/reg" - "github.com/sirupsen/logrus" ) const ( - // DefaultUserAgent sets the header on http requests + // DefaultUserAgent sets the header on http requests. DefaultUserAgent = "regclient/regclient" - // DockerCertDir default location for docker certs + // DockerCertDir default location for docker certs. DockerCertDir = "/etc/docker/certs.d" - // DockerRegistry is the well known name of Docker Hub, "docker.io" + // DockerRegistry is the well known name of Docker Hub, "docker.io". DockerRegistry = config.DockerRegistry - // DockerRegistryAuth is the name of Docker Hub seen in docker's config.json + // DockerRegistryAuth is the name of Docker Hub seen in docker's config.json. DockerRegistryAuth = config.DockerRegistryAuth - // DockerRegistryDNS is the actual registry DNS name for Docker Hub + // DockerRegistryDNS is the actual registry DNS name for Docker Hub. DockerRegistryDNS = config.DockerRegistryDNS ) -// RegClient is used to access OCI distribution-spec registries +// RegClient is used to access OCI distribution-spec registries. type RegClient struct { hosts map[string]*config.Host log *logrus.Logger @@ -40,10 +41,10 @@ type RegClient struct { fs rwfs.RWFS } -// Opt functions are used to configure NewRegClient +// Opt functions are used by [New] to create a [*RegClient]. type Opt func(*RegClient) -// New returns a registry client +// New returns a registry client. func New(opts ...Opt) *RegClient { var rc = RegClient{ hosts: map[string]*config.Host{}, @@ -63,7 +64,7 @@ func New(opts ...Opt) *RegClient { } // inject Docker Hub settings - rc.hostSet(*config.HostNewName(config.DockerRegistryAuth)) + _ = rc.hostSet(*config.HostNewName(config.DockerRegistryAuth)) for _, opt := range opts { opt(&rc) @@ -95,54 +96,54 @@ func New(opts ...Opt) *RegClient { return &rc } -// WithBlobLimit sets the max size for chunked blob uploads which get stored in memory +// WithBlobLimit sets the max size for chunked blob uploads which get stored in memory. // -// Deprecated: replace with WithRegOpts(reg.WithBlobLimit(limit)) +// Deprecated: replace with WithRegOpts(reg.WithBlobLimit(limit)), see [WithRegOpts] and [reg.WithBlobLimit]. func WithBlobLimit(limit int64) Opt { return func(rc *RegClient) { rc.regOpts = append(rc.regOpts, reg.WithBlobLimit(limit)) } } -// WithBlobSize overrides default blob sizes +// WithBlobSize overrides default blob sizes. // -// Deprecated: replace with WithRegOpts(reg.WithBlobSize(chunk, max)) +// Deprecated: replace with WithRegOpts(reg.WithBlobSize(chunk, max)), see [WithRegOpts] and [reg.WithBlobSize]. func WithBlobSize(chunk, max int64) Opt { return func(rc *RegClient) { rc.regOpts = append(rc.regOpts, reg.WithBlobSize(chunk, max)) } } -// WithCertDir adds a path of certificates to trust similar to Docker's /etc/docker/certs.d +// WithCertDir adds a path of certificates to trust similar to Docker's /etc/docker/certs.d. // -// Deprecated: replace with WithRegOpts(reg.WithCertDirs(path)) +// Deprecated: replace with WithRegOpts(reg.WithCertDirs(path)), see [WithRegOpts] and [reg.WithCertDirs]. func WithCertDir(path ...string) Opt { return func(rc *RegClient) { rc.regOpts = append(rc.regOpts, reg.WithCertDirs(path)) } } -// WithConfigHost adds a list of config host settings +// WithConfigHost adds a list of config host settings. func WithConfigHost(configHost ...config.Host) Opt { return func(rc *RegClient) { rc.hostLoad("host", configHost) } } -// WithConfigHosts adds a list of config host settings +// WithConfigHosts adds a list of config host settings. // -// Deprecated: replace with WithConfigHost +// Deprecated: replace with [WithConfigHost]. func WithConfigHosts(configHosts []config.Host) Opt { return WithConfigHost(configHosts...) } -// WithDockerCerts adds certificates trusted by docker in /etc/docker/certs.d +// WithDockerCerts adds certificates trusted by docker in /etc/docker/certs.d. func WithDockerCerts() Opt { return WithCertDir(DockerCertDir) } -// WithDockerCreds adds configuration from users docker config with registry logins -// This changes the default value from the config file, and should be added after the config file is loaded +// WithDockerCreds adds configuration from users docker config with registry logins. +// This changes the default value from the config file, and should be added after the config file is loaded. func WithDockerCreds() Opt { return func(rc *RegClient) { configHosts, err := config.DockerLoad() @@ -156,21 +157,21 @@ func WithDockerCreds() Opt { } } -// WithFS overrides the backing filesystem (used by ocidir) +// WithFS overrides the backing filesystem (used by ocidir). func WithFS(fs rwfs.RWFS) Opt { return func(rc *RegClient) { rc.fs = fs } } -// WithLog overrides default logrus Logger +// WithLog overrides default logrus Logger. func WithLog(log *logrus.Logger) Opt { return func(rc *RegClient) { rc.log = log } } -// WithRegOpts passes through opts to the reg scheme +// WithRegOpts passes through opts to the reg scheme. func WithRegOpts(opts ...reg.Opts) Opt { return func(rc *RegClient) { if len(opts) == 0 { @@ -180,25 +181,25 @@ func WithRegOpts(opts ...reg.Opts) Opt { } } -// WithRetryDelay specifies the time permitted for retry delays +// WithRetryDelay specifies the time permitted for retry delays. // -// Deprecated: replace with WithRegOpts(reg.WithDelay(delayInit, delayMax)) +// Deprecated: replace with WithRegOpts(reg.WithDelay(delayInit, delayMax)), see [WithRegOpts] and [reg.WithDelay]. func WithRetryDelay(delayInit, delayMax time.Duration) Opt { return func(rc *RegClient) { rc.regOpts = append(rc.regOpts, reg.WithDelay(delayInit, delayMax)) } } -// WithRetryLimit specifies the number of retries for non-fatal errors +// WithRetryLimit specifies the number of retries for non-fatal errors. // -// Deprecated: replace with WithRegOpts(reg.WithRetryLimit(retryLimit)) +// Deprecated: replace with WithRegOpts(reg.WithRetryLimit(retryLimit)), see [WithRegOpts] and [reg.WithRetryLimit]. func WithRetryLimit(retryLimit int) Opt { return func(rc *RegClient) { rc.regOpts = append(rc.regOpts, reg.WithRetryLimit(retryLimit)) } } -// WithUserAgent specifies the User-Agent http header +// WithUserAgent specifies the User-Agent http header. func WithUserAgent(ua string) Opt { return func(rc *RegClient) { rc.userAgent = ua diff --git a/vendor/github.com/regclient/regclient/release.md b/vendor/github.com/regclient/regclient/release.md index b65e5033c..760aace2d 100644 --- a/vendor/github.com/regclient/regclient/release.md +++ b/vendor/github.com/regclient/regclient/release.md @@ -1,51 +1,103 @@ -# Release v0.4.8 - -Breaking Changes: - -- Deprecated: `regclient.WithConfigHosts` is replaced by a variadic on `regclient.WithConfigHost` ([PR 409][pr-409]) -- Deprecated: `regclient.WithBlobLimit`, `regcleint.WithBlobSize`, `regclient.WithCertDir`, `regclient.WithRetryDelay`, and `regclient.WithRetryLimit` are replaced by `regclient.WithRegOpts` ([PR 409][pr-409]) - -New Features: - -- Add `--platform` option to `regctl image copy/export` ([PR 379][pr-379]) -- Add option to override name in `regctl image export` ([PR 380][pr-380]) -- Add platforms option to `regctl index add/create` ([PR 381][pr-381]) -- Add `--referrers` and `--digest-tags` options to `regctl index add/create` ([PR 382][pr-382]) -- Add `regctl blob copy` command ([PR 385][pr-385]) -- Adding `regctl image mod --to-docker` to convert manifests from OCI to Docker schema2 ([PR 388][pr-388]) -- Support `OCI-Chunk-Min-Length` header ([PR 394][pr-394]) -- Add support for registry warning headers ([PR 396][pr-396]) -- Add `regclient.WithRegOpts` ([PR 408][pr-408]) - -Bug Fixes: - -- Improve handling of the referrers API with Harbor ([PR 389][pr-389]) -- Fix an issue on `regctl tag rm` to support registries that require a layer ([PR 395][pr-395]) -- Image mod only converts `config.mediaType` between known values ([PR 399][pr-399]) -- Ignore anonymous blob mount failures ([PR 401][pr-401]) -- Fix handling of docker registry logins with `credStore` ([PR 405][pr-405]) -- Fix regsync handling of the paginated repo listing when syncing registries ([PR 406][pr-406]) - -Other Changes: - -- Recursively sign manifest list and platform specific images with cosign ([PR 378][pr-378]) -- Include tag in the version output ([PR 392][pr-392]) - -[pr-378]: https://github.com/regclient/regclient/pull/378 -[pr-379]: https://github.com/regclient/regclient/pull/379 -[pr-380]: https://github.com/regclient/regclient/pull/380 -[pr-381]: https://github.com/regclient/regclient/pull/381 -[pr-382]: https://github.com/regclient/regclient/pull/382 -[pr-385]: https://github.com/regclient/regclient/pull/385 -[pr-388]: https://github.com/regclient/regclient/pull/388 -[pr-389]: https://github.com/regclient/regclient/pull/389 -[pr-392]: https://github.com/regclient/regclient/pull/392 -[pr-394]: https://github.com/regclient/regclient/pull/394 -[pr-395]: https://github.com/regclient/regclient/pull/395 -[pr-396]: https://github.com/regclient/regclient/pull/396 -[pr-399]: https://github.com/regclient/regclient/pull/399 -[pr-401]: https://github.com/regclient/regclient/pull/401 -[pr-405]: https://github.com/regclient/regclient/pull/405 -[pr-406]: https://github.com/regclient/regclient/pull/406 -[pr-408]: https://github.com/regclient/regclient/pull/408 -[pr-409]: https://github.com/regclient/regclient/pull/409 +# Release v0.6.0 + +Breaking: + +- `regctl artifact put` no longer includes the filename annotation by default. Use `--file-title` to include. ([PR 659][pr-659]) +- Dropping Go 1.19 support ([PR 656][pr-656]) +- The platform string for windows images no longer includes the non-standard OS Version value. ([PR 685][pr-685]) + +Fixes: + +- Allow pushing artifacts without an artifactType value. ([PR 658][pr-658]) +- Image mod where created image is in a different repository ([PR 662][pr-662]) +- Improve returned errors from `regclient.ImageCopy`. ([PR 663][pr-663]) +- Cancel blob uploads on failures. ([PR 666][pr-666]) +- Allow ctrl-c on `regctl registry login` ([PR 671][pr-671]) +- Promoting annotations should ignore child manifests that have been removed from the tree. ([PR 675][pr-675]) +- Pin base image digest in build scripts to match Dockerfile pins. ([PR 678][pr-678]) +- Error wrapping fixed in several locations. ([PR 682][pr-682]) +- Platform selection now finds the best match rather than the first compatible match. ([PR 685][pr-685]) +- Update registry versions in CI tests. ([PR 687][pr-687]) +- Missing lines from diff context. ([PR 688][pr-688]) +- Replace `syft packages` with `syft scan`. ([PR 695][pr-695]) +- Image mod can manage the data file on the config descriptor of artifacts. ([PR 697][pr-697]) + +Features: + +- Adding Go 1.22 support ([PR 656][pr-656]) +- Add `BlobDelete` support for ocidir references. ([PR 669][pr-669]) +- Add `regctl blob delete` command. ([PR 669][pr-669]) +- Support formatting output on `regctl registry config`. ([PR 673][pr-673]) +- Add image mod ability to promote common annotations in the child images to the index. ([PR 674][pr-674]) +- Specifying windows OS Version now uses a comma separated syntax in the platform string. ([PR 685][pr-685]) +- Detect AMD64 variant when looking up local platform. ([PR 692][pr-692]) +- Add ability to set the config platform setting with `regctl image mod`. ([PR 693][pr-693]) +- Image mod support for setting the entrypoint and cmd. ([PR 694][pr-694]) + +Deprecations: + +- Errors in `types` are moved to the `errs` package. ([PR 686][pr-686]) +- MediaTypes in `types` are moved to the `mediatype` package. ([PR 686][pr-686]) +- Descriptor and associated variables in `types` are moved to the `descriptor` package. ([PR 686][pr-686]) +- `github.com/regclient/regclient/regclient` (3 levels of regclient) deprecations are now identified by the standard comment to trigger linters. ([PR 686][pr-686]) + +Other changes: + +- Update OSV scanner to monitor for unapproved licenses. ([PR 655][pr-655]) +- Include an API example in the Go docs. ([PR 657][pr-657]) +- Add examples to regctl help messages. ([PR 660][pr-660]) +- Include the Go Report Card badge. ([PR 664][pr-664]) +- Document the availability of the GitHub Actions installer for `regctl`. ([PR 665][pr-665]) +- Add examples to regctl help messages. ([PR 672][pr-672]) +- Redesign how annotations are added to the regclient images. ([PR 676][pr-676]) +- Remove uuid dependency from test code, replace with a random string generator. ([PR 677][pr-677]) +- Manage base image annotation with version-bump. ([PR 679][pr-679]) +- Use `t.Fatal` where appropriate. ([PR 680][pr-680]) +- Remove wraperr package. ([PR 681][pr-681]) +- Add links to the GHA workflow badges. ([PR 683][pr-683]) +- Include a download count badge. ([PR 684][pr-684]) +- Refactoring `types` package to avoid circular dependency issues. ([PR 686][pr-686]) +- Cleanup unused parameters on private functions. ([PR 698][pr-698]) +- Resume push of SBOMs to Docker Hub. ([PR 701][pr-701]) + +Contributors: + +- @sudo-bmitch + +[pr-655]: https://github.com/regclient/regclient/pull/655 +[pr-656]: https://github.com/regclient/regclient/pull/656 +[pr-657]: https://github.com/regclient/regclient/pull/657 +[pr-658]: https://github.com/regclient/regclient/pull/658 +[pr-659]: https://github.com/regclient/regclient/pull/659 +[pr-660]: https://github.com/regclient/regclient/pull/660 +[pr-662]: https://github.com/regclient/regclient/pull/662 +[pr-663]: https://github.com/regclient/regclient/pull/663 +[pr-664]: https://github.com/regclient/regclient/pull/664 +[pr-665]: https://github.com/regclient/regclient/pull/665 +[pr-666]: https://github.com/regclient/regclient/pull/666 +[pr-669]: https://github.com/regclient/regclient/pull/669 +[pr-671]: https://github.com/regclient/regclient/pull/671 +[pr-672]: https://github.com/regclient/regclient/pull/672 +[pr-673]: https://github.com/regclient/regclient/pull/673 +[pr-674]: https://github.com/regclient/regclient/pull/674 +[pr-675]: https://github.com/regclient/regclient/pull/675 +[pr-676]: https://github.com/regclient/regclient/pull/676 +[pr-677]: https://github.com/regclient/regclient/pull/677 +[pr-678]: https://github.com/regclient/regclient/pull/678 +[pr-679]: https://github.com/regclient/regclient/pull/679 +[pr-680]: https://github.com/regclient/regclient/pull/680 +[pr-681]: https://github.com/regclient/regclient/pull/681 +[pr-682]: https://github.com/regclient/regclient/pull/682 +[pr-683]: https://github.com/regclient/regclient/pull/683 +[pr-684]: https://github.com/regclient/regclient/pull/684 +[pr-686]: https://github.com/regclient/regclient/pull/686 +[pr-685]: https://github.com/regclient/regclient/pull/685 +[pr-687]: https://github.com/regclient/regclient/pull/687 +[pr-688]: https://github.com/regclient/regclient/pull/688 +[pr-692]: https://github.com/regclient/regclient/pull/692 +[pr-693]: https://github.com/regclient/regclient/pull/693 +[pr-694]: https://github.com/regclient/regclient/pull/694 +[pr-695]: https://github.com/regclient/regclient/pull/695 +[pr-697]: https://github.com/regclient/regclient/pull/697 +[pr-698]: https://github.com/regclient/regclient/pull/698 +[pr-701]: https://github.com/regclient/regclient/pull/701 diff --git a/vendor/github.com/regclient/regclient/repo.go b/vendor/github.com/regclient/regclient/repo.go index 9a2cf80b3..c2a2d59f3 100644 --- a/vendor/github.com/regclient/regclient/repo.go +++ b/vendor/github.com/regclient/regclient/repo.go @@ -2,9 +2,11 @@ package regclient import ( "context" + "fmt" + "strings" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/repo" ) @@ -12,17 +14,20 @@ type repoLister interface { RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) } -// RepoList returns a list of repositories on a registry -// Note the underlying "_catalog" API is not supported on many cloud registries +// RepoList returns a list of repositories on a registry. +// Note the underlying "_catalog" API is not supported on many cloud registries. func (rc *RegClient) RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) { + i := strings.Index(hostname, "/") + if i > 0 { + return nil, fmt.Errorf("invalid hostname: %s%.0w", hostname, errs.ErrParsingFailed) + } schemeAPI, err := rc.schemeGet("reg") if err != nil { return nil, err } rl, ok := schemeAPI.(repoLister) if !ok { - return nil, types.ErrNotImplemented + return nil, errs.ErrNotImplemented } return rl.RepoList(ctx, hostname, opts...) - } diff --git a/vendor/github.com/regclient/regclient/scheme.go b/vendor/github.com/regclient/regclient/scheme.go index 1fc0eeb07..a18a7af9d 100644 --- a/vendor/github.com/regclient/regclient/scheme.go +++ b/vendor/github.com/regclient/regclient/scheme.go @@ -5,28 +5,20 @@ import ( "fmt" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" ) func (rc *RegClient) schemeGet(scheme string) (scheme.API, error) { s, ok := rc.schemes[scheme] if !ok { - return nil, fmt.Errorf("%w: unknown scheme \"%s\"", types.ErrNotImplemented, scheme) + return nil, fmt.Errorf("%w: unknown scheme \"%s\"", errs.ErrNotImplemented, scheme) } return s, nil } -func (rc *RegClient) schemeInfo(r ref.Ref) (scheme.Info, error) { - schemeAPI, err := rc.schemeGet(r.Scheme) - if err != nil { - return scheme.Info{}, err - } - return schemeAPI.Info(), nil -} - -// Close is used to free resources associated with a reference -// With ocidir, this may trigger a garbage collection process +// Close is used to free resources associated with a reference. +// With ocidir, this may trigger a garbage collection process. func (rc *RegClient) Close(ctx context.Context, r ref.Ref) error { schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go index b609811d9..785e2de7d 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go @@ -1,7 +1,6 @@ package ocidir import ( - "bytes" "context" "errors" "fmt" @@ -14,20 +13,25 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/rwfs" - "github.com/regclient/regclient/types" "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) -// BlobDelete removes a blob from the repository -func (o *OCIDir) BlobDelete(ctx context.Context, r ref.Ref, d types.Descriptor) error { - return types.ErrNotImplemented +// BlobDelete removes a blob from the repository. +// This method does not verify that blobs are unused. +// Calling the [OCIDir.Close] method to trigger the garbage collection is preferred. +func (o *OCIDir) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + return o.fs.Remove(file) } // BlobGet retrieves a blob, returning a reader -func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) fd, err := o.fs.Open(file) if err != nil { @@ -36,7 +40,7 @@ func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (bl if d.Size <= 0 { fi, err := fd.Stat() if err != nil { - fd.Close() + _ = fd.Close() return nil, err } d.Size = fi.Size() @@ -54,7 +58,7 @@ func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (bl } // BlobHead verifies the existence of a blob, the reader contains the headers but no body to read -func (o *OCIDir) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +func (o *OCIDir) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) fd, err := o.fs.Open(file) if err != nil { @@ -76,61 +80,78 @@ func (o *OCIDir) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (b } // BlobMount attempts to perform a server side copy of the blob -func (o *OCIDir) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor) error { - return types.ErrUnsupported +func (o *OCIDir) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + return errs.ErrUnsupported } // BlobPut sends a blob to the repository, returns the digest and size when successful -func (o *OCIDir) BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr io.Reader) (types.Descriptor, error) { +func (o *OCIDir) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + t := o.throttleGet(r, false) + err := t.Acquire(ctx) + if err != nil { + return d, err + } + defer t.Release(ctx) + + err = o.initIndex(r, false) + if err != nil { + return d, err + } digester := digest.Canonical.Digester() rdr = io.TeeReader(rdr, digester.Hash()) - // if digest unavailable, read into a []byte+digest, and replace rdr - if d.Digest == "" || d.Size <= 0 { - b, err := io.ReadAll(rdr) - if err != nil { - return d, err - } - if d.Digest == "" { - d.Digest = digester.Digest() - } else if d.Digest != digester.Digest() { - return d, fmt.Errorf("unexpected digest, expected %s, computed %s", d.Digest, digester.Digest()) - } - if d.Size <= 0 { - d.Size = int64(len(b)) - } else if d.Size != int64(len(b)) { - return d, fmt.Errorf("unexpected blob length, expected %d, received %d", d.Size, int64(len(b))) - } - rdr = bytes.NewReader(b) - digester = nil // no need to recompute or validate digest + // write the blob to a tmp file + var dir, tmpPattern string + if d.Digest != "" && d.Size > 0 { + dir = path.Join(r.Path, "blobs", d.Digest.Algorithm().String()) + tmpPattern = d.Digest.Encoded() + ".*.tmp" + } else { + dir = path.Join(r.Path, "blobs", digest.Canonical.String()) + tmpPattern = "*.tmp" } - // write the blob to the CAS file - dir := path.Join(r.Path, "blobs", d.Digest.Algorithm().String()) - err := rwfs.MkdirAll(o.fs, dir, 0777) + err = rwfs.MkdirAll(o.fs, dir, 0777) if err != nil && !errors.Is(err, fs.ErrExist) { return d, fmt.Errorf("failed creating %s: %w", dir, err) } - file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) - fd, err := o.fs.Create(file) + tmpFile, err := rwfs.CreateTemp(o.fs, dir, tmpPattern) if err != nil { - return d, fmt.Errorf("failed creating %s: %w", file, err) + return d, fmt.Errorf("failed creating blob tmp file: %w", err) } - defer fd.Close() - i, err := io.Copy(fd, rdr) + fi, err := tmpFile.Stat() + if err != nil { + return d, fmt.Errorf("failed to stat blob tmpfile: %w", err) + } + tmpName := fi.Name() + i, err := io.Copy(tmpFile, rdr) + errC := tmpFile.Close() if err != nil { return d, err } - // validate result - if digester != nil && d.Digest != digester.Digest() { + if errC != nil { + return d, errC + } + // validate result matches descriptor, or update descriptor if it wasn't defined + if d.Digest == "" || d.Size <= 0 { + d.Digest = digester.Digest() + } else if d.Digest != digester.Digest() { return d, fmt.Errorf("unexpected digest, expected %s, computed %s", d.Digest, digester.Digest()) } - if d.Size > 0 && i != d.Size { + if d.Size <= 0 { + d.Size = i + } else if i != d.Size { return d, fmt.Errorf("unexpected blob length, expected %d, received %d", d.Size, i) } - d.Size = i + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + err = o.fs.Rename(path.Join(dir, tmpName), file) + if err != nil { + return d, fmt.Errorf("failed to write blob (rename tmp file %s to %s): %w", path.Join(dir, tmpName), file, err) + } o.log.WithFields(logrus.Fields{ "ref": r.CommonName(), "file": file, }).Debug("pushed blob") + + o.mu.Lock() o.refMod(r) + o.mu.Unlock() return d, nil } diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/close.go b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go index 88f31fdaa..2f36154f5 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/close.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go @@ -6,9 +6,10 @@ import ( "io/fs" "path" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/types/manifest" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) // Close triggers a garbage collection if the underlying path has been modified @@ -19,8 +20,8 @@ func (o *OCIDir) Close(ctx context.Context, r ref.Ref) error { o.mu.Lock() defer o.mu.Unlock() - if _, ok := o.modRefs[r.Path]; !ok { - // unmodified, no need to gc ref + if gc, ok := o.modRefs[r.Path]; !ok || !gc.mod || gc.locks > 0 { + // unmodified or locked, skip gc return nil } @@ -30,7 +31,7 @@ func (o *OCIDir) Close(ctx context.Context, r ref.Ref) error { }).Debug("running GC") dl := map[string]bool{} // recurse through index, manifests, and blob lists, generating a digest list - index, err := o.readIndex(r) + index, err := o.readIndex(r, true) if err != nil { return err } @@ -65,7 +66,10 @@ func (o *OCIDir) Close(ctx context.Context, r ref.Ref) error { "digest": digest, }).Debug("ocidir garbage collect") // delete - o.fs.Remove(path.Join(blobsPath, blobDir.Name(), digestFile.Name())) + err = o.fs.Remove(path.Join(blobsPath, blobDir.Name(), digestFile.Name())) + if err != nil { + return fmt.Errorf("failed to delete %s: %w", path.Join(blobsPath, blobDir.Name(), digestFile.Name()), err) + } } } } @@ -81,11 +85,9 @@ func (o *OCIDir) closeProcManifest(ctx context.Context, r ref.Ref, m manifest.Ma return err } for _, cur := range ml { - cr, _ := ref.New(r.CommonName()) - cr.Tag = "" - cr.Digest = cur.Digest.String() + cr := r.SetDigest(cur.Digest.String()) (*dl)[cr.Digest] = true - cm, err := o.ManifestGet(ctx, cr) + cm, err := o.manifestGet(ctx, cr) if err != nil { // ignore errors in case a manifest has been deleted or sparse copy o.log.WithFields(logrus.Fields{ diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go index 1406d44c4..3457c6d61 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go @@ -14,19 +14,23 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/rwfs" - "github.com/regclient/regclient/internal/wraperr" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) // ManifestDelete removes a manifest, including all tags that point to that manifest func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + if r.Digest == "" { - return wraperr.New(fmt.Errorf("digest required to delete manifest, reference %s", r.CommonName()), types.ErrMissingDigest) + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) } mc := scheme.ManifestConfig{} @@ -36,7 +40,7 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M // always check for refers with ocidir if mc.Manifest == nil { - m, err := o.ManifestGet(ctx, r) + m, err := o.manifestGet(ctx, r) if err != nil { return fmt.Errorf("failed to pull manifest for refers: %w", err) } @@ -48,7 +52,7 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M if err == nil && sDesc != nil && sDesc.MediaType != "" && sDesc.Size > 0 { // attempt to delete the referrer, but ignore if the referrer entry wasn't found err = o.referrerDelete(ctx, r, mc.Manifest) - if err != nil && !errors.Is(err, types.ErrNotFound) { + if err != nil && !errors.Is(err, errs.ErrNotFound) && !errors.Is(err, fs.ErrNotExist) { return err } } @@ -57,7 +61,7 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M // get index changed := false - index, err := o.readIndex(r) + index, err := o.readIndex(r, true) if err != nil { return fmt.Errorf("failed to read index: %w", err) } @@ -70,7 +74,7 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M } // push manifest back out if changed { - err = o.writeIndex(r, index) + err = o.writeIndex(r, index, true) if err != nil { return fmt.Errorf("failed to write index: %w", err) } @@ -89,7 +93,13 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M // ManifestGet retrieves a manifest from a repository func (o *OCIDir) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { - index, err := o.readIndex(r) + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestGet(ctx, r) +} + +func (o *OCIDir) manifestGet(_ context.Context, r ref.Ref) (manifest.Manifest, error) { + index, err := o.readIndex(r, true) if err != nil { return nil, fmt.Errorf("unable to read oci index: %w", err) } @@ -105,7 +115,7 @@ func (o *OCIDir) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, } } if desc.Digest == "" { - return nil, types.ErrNotFound + return nil, errs.ErrNotFound } file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) fd, err := o.fs.Open(file) @@ -133,7 +143,7 @@ func (o *OCIDir) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, // ManifestHead gets metadata about the manifest (existence, digest, mediatype, size) func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { - index, err := o.readIndex(r) + index, err := o.readIndex(r, false) if err != nil { return nil, fmt.Errorf("unable to read oci index: %w", err) } @@ -149,13 +159,13 @@ func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest } } if desc.Digest == "" { - return nil, types.ErrNotFound + return nil, errs.ErrNotFound } // verify underlying file exists file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) fi, err := rwfs.Stat(o.fs, file) if err != nil || fi.IsDir() { - return nil, types.ErrNotFound + return nil, errs.ErrNotFound } // if missing, set media type on desc if desc.MediaType == "" { @@ -176,9 +186,9 @@ func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest desc.MediaType = mt.MediaType desc.Size = int64(len(raw)) } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { - desc.MediaType = types.MediaTypeDocker1ManifestSigned + desc.MediaType = mediatype.Docker1ManifestSigned } else if mt.SchemaVersion == 1 { - desc.MediaType = types.MediaTypeDocker1Manifest + desc.MediaType = mediatype.Docker1Manifest desc.Size = int64(len(raw)) } } @@ -190,6 +200,12 @@ func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest // ManifestPut sends a manifest to the repository func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestPut(ctx, r, m, opts...) +} + +func (o *OCIDir) manifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { config := scheme.ManifestConfig{} for _, opt := range opts { opt(&config) @@ -197,12 +213,9 @@ func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest if !config.Child && r.Digest == "" && r.Tag == "" { r.Tag = "latest" } - - indexChanged := false - index, err := o.readIndex(r) + err := o.initIndex(r, true) if err != nil { - index = indexCreate() - indexChanged = true + return err } desc := m.GetDescriptor() b, err := m.RawBody() @@ -224,30 +237,34 @@ func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest if err != nil && !errors.Is(err, fs.ErrExist) { return fmt.Errorf("failed creating %s: %w", dir, err) } - file := path.Join(dir, desc.Digest.Encoded()) - fd, err := o.fs.Create(file) + // write to a tmp file, rename after validating + tmpFile, err := rwfs.CreateTemp(o.fs, dir, desc.Digest.Encoded()+".*.tmp") if err != nil { - return fmt.Errorf("failed to create manifest: %w", err) + return fmt.Errorf("failed to create manifest tmpfile: %w", err) } - defer fd.Close() - _, err = fd.Write(b) + fi, err := tmpFile.Stat() if err != nil { - return fmt.Errorf("failed to write manifest: %w", err) + return fmt.Errorf("failed to stat manifest tmpfile: %w", err) } - // replace existing tag or create a new entry - if !config.Child { - err := indexSet(&index, r, desc) - if err != nil { - return fmt.Errorf("failed to update index: %w", err) - } - indexChanged = true + tmpName := fi.Name() + _, err = tmpFile.Write(b) + errC := tmpFile.Close() + if err != nil { + return fmt.Errorf("failed to write manifest tmpfile: %w", err) } - // write the index.json and oci-layout if it's been changed - if indexChanged { - err = o.writeIndex(r, index) - if err != nil { - return fmt.Errorf("failed to write index: %w", err) - } + if errC != nil { + return fmt.Errorf("failed to close manifest tmpfile: %w", errC) + } + file := path.Join(dir, desc.Digest.Encoded()) + err = o.fs.Rename(path.Join(dir, tmpName), file) + if err != nil { + return fmt.Errorf("failed to write manifest (rename tmpfile): %w", err) + } + + // verify/update index + err = o.updateIndex(r, desc, config.Child, true) + if err != nil { + return err } o.refMod(r) o.log.WithFields(logrus.Fields{ @@ -268,6 +285,5 @@ func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest } } } - return nil } diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go index 6ce47fe19..7ea731565 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go @@ -11,33 +11,45 @@ import ( "strings" "sync" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/rwfs" - "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/internal/throttle" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) const ( imageLayoutFile = "oci-layout" aOCIRefName = "org.opencontainers.image.ref.name" aCtrdImageName = "io.containerd.image.name" + defThrottle = 3 ) // OCIDir is used for accessing OCI Image Layouts defined as a directory type OCIDir struct { - fs rwfs.RWFS - log *logrus.Logger - gc bool - modRefs map[string]ref.Ref - mu sync.Mutex + fs rwfs.RWFS + log *logrus.Logger + gc bool + modRefs map[string]*ociGC + throttle map[string]*throttle.Throttle + throttleDef int + mu sync.Mutex +} + +type ociGC struct { + mod bool + locks int } type ociConf struct { - fs rwfs.RWFS - gc bool - log *logrus.Logger + fs rwfs.RWFS + gc bool + log *logrus.Logger + throttle int } // Opts are used for passing options to ocidir @@ -46,17 +58,20 @@ type Opts func(*ociConf) // New creates a new OCIDir with options func New(opts ...Opts) *OCIDir { conf := ociConf{ - log: &logrus.Logger{Out: io.Discard}, - gc: true, + log: &logrus.Logger{Out: io.Discard}, + gc: true, + throttle: defThrottle, } for _, opt := range opts { opt(&conf) } return &OCIDir{ - fs: conf.fs, - log: conf.log, - gc: conf.gc, - modRefs: map[string]ref.Ref{}, + fs: conf.fs, + log: conf.log, + gc: conf.gc, + modRefs: map[string]*ociGC{}, + throttle: map[string]*throttle.Throttle{}, + throttleDef: conf.throttle, } } @@ -85,15 +100,98 @@ func WithLog(log *logrus.Logger) Opts { } } -// Info is experimental, do not use -func (o *OCIDir) Info() scheme.Info { - return scheme.Info{ManifestPushFirst: true} +// WithThrottle provides a number of concurrent write actions (blob/manifest put) +func WithThrottle(count int) Opts { + return func(c *ociConf) { + c.throttle = count + } +} + +// GCLock is used to prevent GC on a ref +func (o *OCIDir) GCLock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.locks++ + } else { + o.modRefs[r.Path] = &ociGC{locks: 1} + } +} + +// GCUnlock removes a hold on GC of a ref, this must be done before the ref is closed +func (o *OCIDir) GCUnlock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil && gc.locks > 0 { + gc.locks-- + } +} + +// Throttle is used to limit concurrency +func (o *OCIDir) Throttle(r ref.Ref, put bool) []*throttle.Throttle { + tList := []*throttle.Throttle{} + // throttle only applies to put requests + if !put || o.throttleDef <= 0 { + return tList + } + return []*throttle.Throttle{o.throttleGet(r, false)} +} + +func (o *OCIDir) throttleGet(r ref.Ref, locked bool) *throttle.Throttle { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + if t, ok := o.throttle[r.Path]; ok { + return t + } + // init a new throttle + o.throttle[r.Path] = throttle.New(o.throttleDef) + return o.throttle[r.Path] +} + +func (o *OCIDir) initIndex(r ref.Ref, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + layoutFile := path.Join(r.Path, imageLayoutFile) + _, err := rwfs.Stat(o.fs, layoutFile) + if err == nil { + return nil + } + err = rwfs.MkdirAll(o.fs, r.Path, 0777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", r.Path, err) + } + // create/replace oci-layout file + layout := v1.ImageLayout{ + Version: "1.0.0", + } + lb, err := json.Marshal(layout) + if err != nil { + return fmt.Errorf("cannot marshal layout: %w", err) + } + lfh, err := o.fs.Create(layoutFile) + if err != nil { + return fmt.Errorf("cannot create %s: %w", imageLayoutFile, err) + } + defer lfh.Close() + _, err = lfh.Write(lb) + if err != nil { + return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) + } + return nil } -func (o *OCIDir) readIndex(r ref.Ref) (v1.Index, error) { +func (o *OCIDir) readIndex(r ref.Ref, locked bool) (v1.Index, error) { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } // validate dir index := v1.Index{} - err := o.valid(r.Path) + err := o.valid(r.Path, true) if err != nil { return index, err } @@ -114,7 +212,38 @@ func (o *OCIDir) readIndex(r ref.Ref) (v1.Index, error) { return index, nil } -func (o *OCIDir) writeIndex(r ref.Ref, i v1.Index) error { +func (o *OCIDir) updateIndex(r ref.Ref, d descriptor.Descriptor, child bool, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + indexChanged := false + index, err := o.readIndex(r, true) + if err != nil { + index = indexCreate() + indexChanged = true + } + if !child { + err := indexSet(&index, r, d) + if err != nil { + return fmt.Errorf("failed to update index: %w", err) + } + indexChanged = true + } + if indexChanged { + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + } + return nil +} + +func (o *OCIDir) writeIndex(r ref.Ref, i v1.Index, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } err := rwfs.MkdirAll(o.fs, r.Path, 0777) if err != nil && !errors.Is(err, fs.ErrExist) { return fmt.Errorf("failed creating %s: %w", r.Path, err) @@ -137,25 +266,41 @@ func (o *OCIDir) writeIndex(r ref.Ref, i v1.Index) error { return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) } // create/replace index.json file - indexFile := path.Join(r.Path, "index.json") - fh, err := o.fs.Create(indexFile) + tmpFile, err := rwfs.CreateTemp(o.fs, r.Path, "index.json.*.tmp") if err != nil { - return fmt.Errorf("%s cannot be created: %w", indexFile, err) + return fmt.Errorf("cannot create index tmpfile: %w", err) } - defer fh.Close() + fi, err := tmpFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat index tmpfile: %w", err) + } + tmpName := fi.Name() b, err := json.Marshal(i) if err != nil { return fmt.Errorf("cannot marshal index: %w", err) } - _, err = fh.Write(b) + _, err = tmpFile.Write(b) + errC := tmpFile.Close() if err != nil { return fmt.Errorf("cannot write index: %w", err) } + if errC != nil { + return fmt.Errorf("cannot close index: %w", errC) + } + indexFile := path.Join(r.Path, "index.json") + err = o.fs.Rename(path.Join(r.Path, tmpName), indexFile) + if err != nil { + return fmt.Errorf("cannot rename tmpfile to index: %w", err) + } return nil } // func valid (dir) (error) // check for `oci-layout` file and `index.json` for read -func (o *OCIDir) valid(dir string) error { +func (o *OCIDir) valid(dir string, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } layout := v1.ImageLayout{} reqVer := "1.0.0" fh, err := o.fs.Open(path.Join(dir, imageLayoutFile)) @@ -178,22 +323,24 @@ func (o *OCIDir) valid(dir string) error { } func (o *OCIDir) refMod(r ref.Ref) { - o.mu.Lock() - defer o.mu.Unlock() - o.modRefs[r.Path] = r + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.mod = true + } else { + o.modRefs[r.Path] = &ociGC{mod: true} + } } func indexCreate() v1.Index { i := v1.Index{ Versioned: v1.IndexSchemaVersion, - MediaType: types.MediaTypeOCI1ManifestList, - Manifests: []types.Descriptor{}, + MediaType: mediatype.OCI1ManifestList, + Manifests: []descriptor.Descriptor{}, Annotations: map[string]string{}, } return i } -func indexGet(index v1.Index, r ref.Ref) (types.Descriptor, error) { +func indexGet(index v1.Index, r ref.Ref) (descriptor.Descriptor, error) { if r.Digest == "" && r.Tag == "" { r.Tag = "latest" } @@ -216,10 +363,10 @@ func indexGet(index v1.Index, r ref.Ref) (types.Descriptor, error) { } } } - return types.Descriptor{}, types.ErrNotFound + return descriptor.Descriptor{}, errs.ErrNotFound } -func indexSet(index *v1.Index, r ref.Ref, d types.Descriptor) error { +func indexSet(index *v1.Index, r ref.Ref, d descriptor.Descriptor) error { if index == nil { return fmt.Errorf("index is nil") } @@ -230,7 +377,7 @@ func indexSet(index *v1.Index, r ref.Ref, d types.Descriptor) error { d.Annotations[aOCIRefName] = r.Tag } if index.Manifests == nil { - index.Manifests = []types.Descriptor{} + index.Manifests = []descriptor.Descriptor{} } pos := -1 // search for existing diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go new file mode 100644 index 000000000..a3da3ae7f --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go @@ -0,0 +1,28 @@ +package ocidir + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping for an ocidir verifies access to read the path. +func (o *OCIDir) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + fd, err := o.fs.Open(r.Path) + if err != nil { + return ret, err + } + defer fd.Close() + fi, err := fd.Stat() + if err != nil { + return ret, err + } + ret.Stat = fi + if !fi.IsDir() { + return ret, fmt.Errorf("failed to access %s: not a directory", r.Path) + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go index 4471c8717..a1cfe4227 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go @@ -6,8 +6,9 @@ import ( "fmt" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/platform" "github.com/regclient/regclient/types/ref" @@ -16,6 +17,12 @@ import ( // ReferrerList returns a list of referrers to a given reference func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.referrerList(ctx, r, opts...) +} + +func (o *OCIDir) referrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { config := scheme.ReferrerConfig{} for _, opt := range opts { opt(&config) @@ -26,7 +33,7 @@ func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.Ref } // select a platform from a manifest list if config.Platform != "" { - m, err := o.ManifestGet(ctx, r) + m, err := o.manifestGet(ctx, r) if err != nil { return rl, err } @@ -46,7 +53,7 @@ func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.Ref } // if ref is a tag, run a head request for the digest if r.Digest == "" { - m, err := o.ManifestHead(ctx, r) + m, err := o.manifestGet(ctx, r) if err != nil { return rl, err } @@ -58,13 +65,13 @@ func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.Ref if err != nil { return rl, err } - m, err := o.ManifestGet(ctx, rlTag) + m, err := o.manifestGet(ctx, rlTag) if err != nil { - if errors.Is(err, types.ErrNotFound) { + if errors.Is(err, errs.ErrNotFound) { // empty list, initialize a new manifest rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ Versioned: v1.IndexSchemaVersion, - MediaType: types.MediaTypeOCI1ManifestList, + MediaType: mediatype.OCI1ManifestList, })) if err != nil { return rl, err @@ -92,7 +99,7 @@ func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manif // get refers field mSubject, ok := m.(manifest.Subjecter) if !ok { - return fmt.Errorf("manifest does not support subject: %w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) } subject, err := mSubject.GetSubject() if err != nil { @@ -100,16 +107,14 @@ func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manif } // validate/set subject descriptor if subject == nil || subject.MediaType == "" || subject.Digest == "" || subject.Size <= 0 { - return fmt.Errorf("subject is not set%.0w", types.ErrNotFound) + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) } // get descriptor for subject - rSubject := r - rSubject.Tag = "" - rSubject.Digest = subject.Digest.String() + rSubject := r.SetDigest(subject.Digest.String()) // pull existing referrer list - rl, err := o.ReferrerList(ctx, rSubject) + rl, err := o.referrerList(ctx, rSubject) if err != nil { return err } @@ -124,13 +129,13 @@ func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manif return err } if rl.IsEmpty() { - err = o.TagDelete(ctx, rlTag) + err = o.tagDelete(ctx, rlTag) if err == nil { return nil } // if delete is not supported, fall back to pushing empty list } - return o.ManifestPut(ctx, rlTag, rl.Manifest) + return o.manifestPut(ctx, rlTag, rl.Manifest) } // referrerPut pushes a new referrer associated with a given reference @@ -138,7 +143,7 @@ func (o *OCIDir) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest // get subject field mSubject, ok := m.(manifest.Subjecter) if !ok { - return fmt.Errorf("manifest does not support subject: %w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) } subject, err := mSubject.GetSubject() if err != nil { @@ -146,16 +151,14 @@ func (o *OCIDir) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest } // validate/set subject descriptor if subject == nil || subject.MediaType == "" || subject.Digest == "" || subject.Size <= 0 { - return fmt.Errorf("subject is not set%.0w", types.ErrNotFound) + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) } // get descriptor for subject - rSubject := r - rSubject.Tag = "" - rSubject.Digest = subject.Digest.String() + rSubject := r.SetDigest(subject.Digest.String()) // pull existing referrer list - rl, err := o.ReferrerList(ctx, rSubject) + rl, err := o.referrerList(ctx, rSubject) if err != nil { return err } @@ -169,5 +172,5 @@ func (o *OCIDir) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest if err != nil { return err } - return o.ManifestPut(ctx, rlTag, rl.Manifest) + return o.manifestPut(ctx, rlTag, rl.Manifest) } diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go index 0f7fef577..fd34a324a 100644 --- a/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go @@ -8,18 +8,25 @@ import ( "strings" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/tag" ) // TagDelete removes a tag from the repository func (o *OCIDir) TagDelete(ctx context.Context, r ref.Ref) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.tagDelete(ctx, r) +} + +func (o *OCIDir) tagDelete(_ context.Context, r ref.Ref) error { if r.Tag == "" { - return types.ErrMissingTag + return errs.ErrMissingTag } // get index - index, err := o.readIndex(r) + index, err := o.readIndex(r, true) if err != nil { return fmt.Errorf("failed to read index: %w", err) } @@ -32,10 +39,10 @@ func (o *OCIDir) TagDelete(ctx context.Context, r ref.Ref) error { } } if !changed { - return fmt.Errorf("failed deleting %s: %w", r.CommonName(), types.ErrNotFound) + return fmt.Errorf("failed deleting %s: %w", r.CommonName(), errs.ErrNotFound) } // push manifest back out - err = o.writeIndex(r, index) + err = o.writeIndex(r, index, true) if err != nil { return fmt.Errorf("failed to write index: %w", err) } @@ -46,7 +53,7 @@ func (o *OCIDir) TagDelete(ctx context.Context, r ref.Ref) error { // TagList returns a list of tags from the repository func (o *OCIDir) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { // get index - index, err := o.readIndex(r) + index, err := o.readIndex(r, false) if err != nil { return nil, err } @@ -77,7 +84,8 @@ func (o *OCIDir) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) t, err := tag.New( tag.WithRaw(ib), tag.WithRef(r), - tag.WithMT(types.MediaTypeOCI1ManifestList), + tag.WithMT(mediatype.OCI1ManifestList), + tag.WithLayoutIndex(index), tag.WithTags(tl), ) if err != nil { diff --git a/vendor/github.com/regclient/regclient/scheme/reg/blob.go b/vendor/github.com/regclient/regclient/scheme/reg/blob.go index 976750a86..c9d2e7bd3 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/blob.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/blob.go @@ -16,15 +16,21 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/reghttp" - "github.com/regclient/regclient/types" "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" +) + +var ( + zeroDig = digest.Canonical.FromBytes([]byte{}) ) // BlobDelete removes a blob from the repository -func (reg *Reg) BlobDelete(ctx context.Context, r ref.Ref, d types.Descriptor) error { +func (reg *Reg) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { req := ®http.Req{ Host: r.Registry, APIs: map[string]reghttp.ReqAPI{ @@ -46,7 +52,7 @@ func (reg *Reg) BlobDelete(ctx context.Context, r ref.Ref, d types.Descriptor) e } // BlobGet retrieves a blob from the repository, returning a blob reader -func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { // build/send request req := ®http.Req{ Host: r.Registry, @@ -94,7 +100,7 @@ func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blo b := blob.NewReader( blob.WithRef(r), blob.WithReader(resp), - blob.WithDesc(types.Descriptor{ + blob.WithDesc(descriptor.Descriptor{ Digest: d.Digest, }), blob.WithResp(resp.HTTPResponse()), @@ -103,7 +109,7 @@ func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blo } // BlobHead is used to verify if a blob exists and is accessible -func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) { +func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { // build/send request req := ®http.Req{ Host: r.Registry, @@ -151,7 +157,7 @@ func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (bl b := blob.NewReader( blob.WithRef(r), - blob.WithDesc(types.Descriptor{ + blob.WithDesc(descriptor.Descriptor{ Digest: d.Digest, }), blob.WithResp(resp.HTTPResponse()), @@ -160,34 +166,34 @@ func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (bl } // BlobMount attempts to perform a server side copy/mount of the blob between repositories -func (reg *Reg) BlobMount(ctx context.Context, rSrc ref.Ref, rTgt ref.Ref, d types.Descriptor) error { - _, uuid, err := reg.blobMount(ctx, rTgt, d, rSrc) +func (reg *Reg) BlobMount(ctx context.Context, rSrc ref.Ref, rTgt ref.Ref, d descriptor.Descriptor) error { + putURL, _, err := reg.blobMount(ctx, rTgt, d, rSrc) // if mount fails and returns an upload location, cancel that upload if err != nil { - reg.blobUploadCancel(ctx, rTgt, uuid) + _ = reg.blobUploadCancel(ctx, rTgt, putURL) } return err } // BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// // This will attempt an anonymous blob mount first which some registries may support. // It will then try doing a full put of the blob without chunking (most widely supported). // If the full put fails, it will fall back to a chunked upload (useful for flaky networks). -func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr io.Reader) (types.Descriptor, error) { +func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { var putURL *url.URL var err error - // defaults for content-type and length - if d.Size == 0 { - d.Size = -1 - } + validDesc := (d.Digest != "" && d.Size > 0) || (d.Size == 0 && d.Digest == zeroDig) // attempt an anonymous blob mount - if d.Digest != "" && d.Size > 0 { + if validDesc { putURL, _, err = reg.blobMount(ctx, r, d, ref.Ref{}) if err == nil { return d, nil } - if err != types.ErrMountReturnedLocation { + if err != errs.ErrMountReturnedLocation { putURL = nil } } @@ -198,9 +204,8 @@ func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr return d, err } } - // send upload as one-chunk - tryPut := bool(d.Digest != "" && d.Size > 0) + tryPut := validDesc if tryPut { host := reg.hostGet(r.Registry) maxPut := host.BlobMax @@ -219,16 +224,21 @@ func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr // on failure, attempt to seek back to start to perform a chunked upload rdrSeek, ok := rdr.(io.ReadSeeker) if !ok { + _ = reg.blobUploadCancel(ctx, r, putURL) return d, err } offset, errR := rdrSeek.Seek(0, io.SeekStart) if errR != nil || offset != 0 { + _ = reg.blobUploadCancel(ctx, r, putURL) return d, err } } - // send a chunked upload if full upload not possible or too large - return reg.blobPutUploadChunked(ctx, r, putURL, rdr) + d, err = reg.blobPutUploadChunked(ctx, r, d, putURL, rdr) + if err != nil { + _ = reg.blobUploadCancel(ctx, r, putURL) + } + return d, err } func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref) (*url.URL, error) { @@ -280,7 +290,7 @@ func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref) (*url.URL, erro // Extract the location into a new putURL based on whether it's relative, fqdn with a scheme, or without a scheme. location := resp.HTTPResponse().Header.Get("Location") if location == "" { - return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), types.ErrMissingLocation) + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), errs.ErrMissingLocation) } reg.log.WithFields(logrus.Fields{ "location": location, @@ -298,7 +308,7 @@ func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref) (*url.URL, erro return putURL, nil } -func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d types.Descriptor, rSrc ref.Ref) (*url.URL, string, error) { +func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d descriptor.Descriptor, rSrc ref.Ref) (*url.URL, string, error) { // build/send request query := url.Values{} query.Set("mount", d.Digest.String()) @@ -369,14 +379,14 @@ func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d types.Descriptor, "err": err, }).Warn("Mount location header failed to parse") } else { - return putURL, uuid, types.ErrMountReturnedLocation + return putURL, uuid, errs.ErrMountReturnedLocation } } // all other responses unhandled return nil, "", fmt.Errorf("failed to mount blob, digest %s, ref %s: %w", d.Digest.String(), rTgt.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) } -func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d types.Descriptor, putURL *url.URL, rdr io.Reader) error { +func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) error { // append digest to request to use the monolithic upload option if putURL.RawQuery != "" { putURL.RawQuery = putURL.RawQuery + "&digest=" + url.QueryEscape(d.Digest.String()) @@ -387,20 +397,24 @@ func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d types.Descri // make a reader function for the blob readOnce := false bodyFunc := func() (io.ReadCloser, error) { - // if reader is reused, + // handle attempt to reuse blob reader (e.g. on a connection retry or fallback) if readOnce { rdrSeek, ok := rdr.(io.ReadSeeker) if !ok { - return nil, fmt.Errorf("unable to reuse reader") + return nil, fmt.Errorf("blob source is not a seeker%.0w", errs.ErrNotRetryable) } _, err := rdrSeek.Seek(0, io.SeekStart) if err != nil { - return nil, fmt.Errorf("unable to reuse reader") + return nil, fmt.Errorf("seek on blob source failed: %w%.0w", err, errs.ErrNotRetryable) } } readOnce = true return io.NopCloser(rdr), nil } + // special case for the empty blob + if d.Size == 0 && d.Digest == zeroDig { + bodyFunc = nil + } // build/send request header := http.Header{ @@ -432,7 +446,7 @@ func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d types.Descri return nil } -func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url.URL, rdr io.Reader) (types.Descriptor, error) { +func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) (descriptor.Descriptor, error) { host := reg.hostGet(r.Registry) bufSize := host.BlobChunk if bufSize <= 0 { @@ -476,7 +490,7 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url if err == io.EOF || err == io.ErrUnexpectedEOF { finalChunk = true } else if err != nil { - return types.Descriptor{}, fmt.Errorf("failed to send blob chunk, ref %s: %w", r.CommonName(), err) + return d, fmt.Errorf("failed to send blob chunk, ref %s: %w", r.CommonName(), err) } // update length on partial read if chunkSize != len(bufBytes) { @@ -492,7 +506,7 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url bufChange = true } if chunkSize > 0 && chunkStart != bufStart { - return types.Descriptor{}, fmt.Errorf("chunkStart (%d) != bufStart (%d)", chunkStart, bufStart) + return d, fmt.Errorf("chunkStart (%d) != bufStart (%d)", chunkStart, bufStart) } if bufChange { // need to recreate the reader on a change to the slice length, @@ -521,10 +535,13 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url NoMirrors: true, } resp, err := reg.reghttp.Do(ctx, req) - if err != nil && !errors.Is(err, types.ErrHTTPStatus) && !errors.Is(err, types.ErrNotFound) { - return types.Descriptor{}, fmt.Errorf("failed to send blob (chunk), ref %s: http do: %w", r.CommonName(), err) + if err != nil && !errors.Is(err, errs.ErrHTTPStatus) && !errors.Is(err, errs.ErrNotFound) { + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http do: %w", r.CommonName(), err) + } + err = resp.Close() + if err != nil { + return d, fmt.Errorf("failed to close request: %w", err) } - resp.Close() httpResp := resp.HTTPResponse() // distribution-spec is 202, AWS ECR returns a 201 and rejects the put if resp.HTTPResponse().StatusCode == 201 { @@ -547,7 +564,7 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url retryCur++ statusResp, statusErr := reg.blobUploadStatus(ctx, r, &chunkURL) if retryCur > retryLimit || statusErr != nil { - return types.Descriptor{}, fmt.Errorf("failed to send blob (chunk), ref %s: http status: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http status: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) } httpResp = statusResp } else { @@ -570,7 +587,7 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url prevURL := httpResp.Request.URL parseURL, err := prevURL.Parse(location) if err != nil { - return types.Descriptor{}, fmt.Errorf("failed to send blob (parse next chunk location), ref %s: %w", r.CommonName(), err) + return d, fmt.Errorf("failed to send blob (parse next chunk location), ref %s: %w", r.CommonName(), err) } chunkURL = *parseURL } @@ -578,14 +595,22 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url } // compute digest - d := digester.Digest() + dOut := digester.Digest() + if d.Digest != "" && dOut != d.Digest { + return d, fmt.Errorf("%w, expected %s, computed %s", errs.ErrDigestMismatch, d.Digest.String(), dOut.String()) + } + if d.Size != 0 && chunkStart != d.Size { + return d, fmt.Errorf("blob content size does not match descriptor, expected %d, received %d%.0w", d.Size, chunkStart, errs.ErrMismatch) + } + d.Digest = dOut + d.Size = chunkStart // send the final put // append digest to request to use the monolithic upload option if chunkURL.RawQuery != "" { - chunkURL.RawQuery = chunkURL.RawQuery + "&digest=" + url.QueryEscape(d.String()) + chunkURL.RawQuery = chunkURL.RawQuery + "&digest=" + url.QueryEscape(dOut.String()) } else { - chunkURL.RawQuery = "digest=" + url.QueryEscape(d.String()) + chunkURL.RawQuery = "digest=" + url.QueryEscape(dOut.String()) } header := http.Header{ @@ -606,21 +631,21 @@ func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, putURL *url } resp, err := reg.reghttp.Do(ctx, req) if err != nil { - return types.Descriptor{}, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", d, r.CommonName(), err) + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), err) } defer resp.Close() // 201 follows distribution-spec, 204 is listed as possible in the Docker registry spec if resp.HTTPResponse().StatusCode != 201 && resp.HTTPResponse().StatusCode != 204 { - return types.Descriptor{}, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", d, r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) } - return types.Descriptor{Digest: d, Size: chunkStart}, nil + return d, nil } -// TODO: just take a putURL rather than the uuid and call a delete on that url -func (reg *Reg) blobUploadCancel(ctx context.Context, r ref.Ref, uuid string) error { - if uuid == "" { - return fmt.Errorf("failed to cancel upload %s: uuid undefined", r.CommonName()) +// blobUploadCancel stops an upload, releasing resources on the server. +func (reg *Reg) blobUploadCancel(ctx context.Context, r ref.Ref, putURL *url.URL) error { + if putURL == nil { + return fmt.Errorf("failed to cancel upload %s: url undefined", r.CommonName()) } req := ®http.Req{ Host: r.Registry, @@ -629,7 +654,7 @@ func (reg *Reg) blobUploadCancel(ctx context.Context, r ref.Ref, uuid string) er "": { Method: "DELETE", Repository: r.Repository, - Path: "blobs/uploads/" + uuid, + DirectURL: putURL, }, }, } @@ -659,7 +684,7 @@ func (reg *Reg) blobUploadStatus(ctx context.Context, r ref.Ref, putURL *url.URL } resp, err := reg.reghttp.Do(ctx, req) if err != nil { - return nil, fmt.Errorf("failed to get upload status: %v", err) + return nil, fmt.Errorf("failed to get upload status: %w", err) } defer resp.Close() if resp.HTTPResponse().StatusCode != 204 { diff --git a/vendor/github.com/regclient/regclient/scheme/reg/manifest.go b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go index a59d831f4..b649365b2 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/manifest.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go @@ -6,21 +6,24 @@ import ( "fmt" "io" "net/http" + "strconv" + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/limitread" "github.com/regclient/regclient/internal/reghttp" - "github.com/regclient/regclient/internal/wraperr" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/ref" - "github.com/sirupsen/logrus" ) // ManifestDelete removes a manifest by reference (digest) from a registry. // This will implicitly delete all tags pointing to that manifest. func (reg *Reg) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { if r.Digest == "" { - return wraperr.New(fmt.Errorf("digest required to delete manifest, reference %s", r.CommonName()), types.ErrMissingDigest) + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) } mc := scheme.ManifestConfig{} @@ -41,12 +44,14 @@ func (reg *Reg) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.Ma if err == nil && sDesc != nil && sDesc.MediaType != "" && sDesc.Size > 0 { // attempt to delete the referrer, but ignore if the referrer entry wasn't found err = reg.referrerDelete(ctx, r, mc.Manifest) - if err != nil && !errors.Is(err, types.ErrNotFound) { + if err != nil && !errors.Is(err, errs.ErrNotFound) { return err } } } } + rCache := r.SetDigest(r.Digest) + reg.cacheMan.Delete(rCache) // build/send request req := ®http.Req{ @@ -76,22 +81,27 @@ func (reg *Reg) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.Ma func (reg *Reg) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { var tagOrDigest string if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } tagOrDigest = r.Digest } else if r.Tag != "" { tagOrDigest = r.Tag } else { - return nil, wraperr.New(fmt.Errorf("reference missing tag and digest: %s", r.CommonName()), types.ErrMissingTagOrDigest) + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) } // build/send request headers := http.Header{ "Accept": []string{ - types.MediaTypeDocker1Manifest, - types.MediaTypeDocker1ManifestSigned, - types.MediaTypeDocker2Manifest, - types.MediaTypeDocker2ManifestList, - types.MediaTypeOCI1Manifest, - types.MediaTypeOCI1ManifestList, + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, }, } req := ®http.Req{ @@ -114,17 +124,33 @@ func (reg *Reg) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, return nil, fmt.Errorf("failed to get manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) } + // limit length + size, _ := strconv.Atoi(resp.HTTPResponse().Header.Get("Content-Length")) + if size > 0 && reg.manifestMaxPull > 0 && int64(size) > reg.manifestMaxPull { + return nil, fmt.Errorf("manifest too large, received %d, limit %d: %s%.0w", size, reg.manifestMaxPull, r.CommonName(), errs.ErrSizeLimitExceeded) + } + rdr := &limitread.LimitRead{ + Reader: resp, + Limit: reg.manifestMaxPull, + } + // read manifest - rawBody, err := io.ReadAll(resp) + rawBody, err := io.ReadAll(rdr) if err != nil { return nil, fmt.Errorf("error reading manifest for %s: %w", r.CommonName(), err) } - return manifest.New( + m, err := manifest.New( manifest.WithRef(r), manifest.WithHeader(resp.HTTPResponse().Header), manifest.WithRaw(rawBody), ) + if err != nil { + return nil, err + } + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + return m, nil } // ManifestHead returns metadata on the manifest from the registry @@ -132,22 +158,27 @@ func (reg *Reg) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, // build the request var tagOrDigest string if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } tagOrDigest = r.Digest } else if r.Tag != "" { tagOrDigest = r.Tag } else { - return nil, wraperr.New(fmt.Errorf("reference missing tag and digest: %s", r.CommonName()), types.ErrMissingTagOrDigest) + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) } // build/send request headers := http.Header{ "Accept": []string{ - types.MediaTypeDocker1Manifest, - types.MediaTypeDocker1ManifestSigned, - types.MediaTypeDocker2Manifest, - types.MediaTypeDocker2ManifestList, - types.MediaTypeOCI1Manifest, - types.MediaTypeOCI1ManifestList, + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, }, } req := ®http.Req{ @@ -187,7 +218,7 @@ func (reg *Reg) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, reg.log.WithFields(logrus.Fields{ "ref": r.Reference, }).Warn("Manifest put requires a tag") - return types.ErrMissingTag + return errs.ErrMissingTag } // create the request body @@ -200,6 +231,11 @@ func (reg *Reg) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, return fmt.Errorf("error marshalling manifest for %s: %w", r.CommonName(), err) } + // limit length + if reg.manifestMaxPush > 0 && int64(len(mj)) > reg.manifestMaxPush { + return fmt.Errorf("manifest too large, calculated %d, limit %d: %s%.0w", len(mj), reg.manifestMaxPush, r.CommonName(), errs.ErrSizeLimitExceeded) + } + // build/send request headers := http.Header{ "Content-Type": []string{manifest.GetMediaType(m)}, @@ -222,21 +258,31 @@ func (reg *Reg) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, if err != nil { return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), err) } - defer resp.Close() + err = resp.Close() + if err != nil { + return fmt.Errorf("failed to close request: %w", err) + } if resp.HTTPResponse().StatusCode != 201 { return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) } + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + // update referrers if defined on this manifest if mr, ok := m.(manifest.Subjecter); ok { mDesc, err := mr.GetSubject() if err != nil { return err } - if mDesc != nil && mDesc.MediaType != "" && mDesc.Size > 0 { - err = reg.referrerPut(ctx, r, m) - if err != nil { - return err + if mDesc != nil && mDesc.MediaType != "" && mDesc.Size > 0 && mDesc.Digest.String() != "" { + rSubj := r.SetDigest(mDesc.Digest.String()) + reg.cacheRL.Delete(rSubj) + if mDesc.Digest.String() != resp.HTTPResponse().Header.Get(OCISubjectHeader) { + err = reg.referrerPut(ctx, r, m) + if err != nil { + return err + } } } } diff --git a/vendor/github.com/regclient/regclient/scheme/reg/ping.go b/vendor/github.com/regclient/regclient/scheme/reg/ping.go new file mode 100644 index 000000000..4a89548c9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/ping.go @@ -0,0 +1,41 @@ +package reg + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping queries the /v2/ API of the registry to verify connectivity and access. +func (reg *Reg) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + req := ®http.Req{ + Host: r.Registry, + NoMirrors: true, + APIs: map[string]reghttp.ReqAPI{ + "": { + Method: "GET", + Path: "", + }, + }, + } + + resp, err := reg.reghttp.Do(ctx, req) + if resp != nil && resp.HTTPResponse() != nil { + ret.Header = resp.HTTPResponse().Header + } + if err != nil { + return ret, fmt.Errorf("failed to ping registry %s: %w", r.Registry, err) + } + defer resp.Close() + + if resp.HTTPResponse().StatusCode != 200 { + return ret, fmt.Errorf("failed to ping registry %s: %w", + r.Registry, reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/referrer.go b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go index aa12ae48d..160330870 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/referrer.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go @@ -10,14 +10,17 @@ import ( "github.com/regclient/regclient/internal/httplink" "github.com/regclient/regclient/internal/reghttp" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/platform" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/referrer" ) +const OCISubjectHeader = "OCI-Subject" + // ReferrerList returns a list of referrers to a given reference func (reg *Reg) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { config := scheme.ReferrerConfig{} @@ -61,20 +64,49 @@ func (reg *Reg) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.Refe r.Digest = m.GetDescriptor().Digest.String() } - // attempt to call the referrer API - rl, err := reg.referrerListAPI(ctx, r, config) - if err != nil { - rl, err = reg.referrerListTag(ctx, r) + found := false + // try cache + rCache := r.SetDigest(r.Digest) + rl, err := reg.cacheRL.Get(rCache) + if err == nil { + found = true + } + // try referrers API + if !found { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if !ok || referrerEnabled { + // attempt to call the referrer API + rl, err = reg.referrerListByAPI(ctx, r, config) + if !ok { + // save the referrer API state + reg.featureSet("referrer", r.Registry, r.Repository, err == nil) + } + if err == nil { + if config.MatchOpt.ArtifactType == "" { + // only cache if successful and artifactType is not filtered + reg.cacheRL.Set(rCache, rl) + } + found = true + } + } + } + // fall back to tag + if !found { + rl, err = reg.referrerListByTag(ctx, r) + if err == nil { + reg.cacheRL.Set(rCache, rl) + } } if err != nil { return rl, err } - rl = scheme.ReferrerFilter(config, rl) + // apply client side filters and return result + rl = scheme.ReferrerFilter(config, rl) return rl, nil } -func (reg *Reg) referrerListAPI(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig) (referrer.ReferrerList, error) { +func (reg *Reg) referrerListByAPI(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig) (referrer.ReferrerList, error) { rl := referrer.ReferrerList{ Subject: r, Tags: []string{}, @@ -83,7 +115,7 @@ func (reg *Reg) referrerListAPI(ctx context.Context, r ref.Ref, config scheme.Re var resp reghttp.Resp // loop for paging for { - rlAdd, respNext, err := reg.referrerListAPIReq(ctx, r, config, link) + rlAdd, respNext, err := reg.referrerListByAPIPage(ctx, r, config, link) if err != nil { return rl, err } @@ -118,14 +150,14 @@ func (reg *Reg) referrerListAPI(ctx context.Context, r ref.Ref, config scheme.Re return rl, nil } -func (reg *Reg) referrerListAPIReq(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig, link *url.URL) (referrer.ReferrerList, reghttp.Resp, error) { +func (reg *Reg) referrerListByAPIPage(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig, link *url.URL) (referrer.ReferrerList, reghttp.Resp, error) { rl := referrer.ReferrerList{ Subject: r, Tags: []string{}, } query := url.Values{} - if config.FilterArtifactType != "" { - query.Set("artifactType", config.FilterArtifactType) + if config.MatchOpt.ArtifactType != "" { + query.Set("artifactType", config.MatchOpt.ArtifactType) } req := ®http.Req{ Host: r.Registry, @@ -172,7 +204,7 @@ func (reg *Reg) referrerListAPIReq(ctx context.Context, r ref.Ref, config scheme } ociML, ok := m.GetOrig().(v1.Index) if !ok { - return rl, nil, fmt.Errorf("unexpected manifest type for referrers: %s, %w", m.GetDescriptor().MediaType, types.ErrUnsupportedMediaType) + return rl, nil, fmt.Errorf("unexpected manifest type for referrers: %s, %w", m.GetDescriptor().MediaType, errs.ErrUnsupportedMediaType) } rl.Manifest = m rl.Descriptors = ociML.Manifests @@ -181,7 +213,7 @@ func (reg *Reg) referrerListAPIReq(ctx context.Context, r ref.Ref, config scheme return rl, resp, nil } -func (reg *Reg) referrerListTag(ctx context.Context, r ref.Ref) (referrer.ReferrerList, error) { +func (reg *Reg) referrerListByTag(ctx context.Context, r ref.Ref) (referrer.ReferrerList, error) { rl := referrer.ReferrerList{ Subject: r, Tags: []string{}, @@ -192,11 +224,11 @@ func (reg *Reg) referrerListTag(ctx context.Context, r ref.Ref) (referrer.Referr } m, err := reg.ManifestGet(ctx, rlTag) if err != nil { - if errors.Is(err, types.ErrNotFound) { + if errors.Is(err, errs.ErrNotFound) { // empty list, initialize a new manifest rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ Versioned: v1.IndexSchemaVersion, - MediaType: types.MediaTypeOCI1ManifestList, + MediaType: mediatype.OCI1ManifestList, })) if err != nil { return rl, err @@ -222,7 +254,7 @@ func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manife // get subject field mSubject, ok := m.(manifest.Subjecter) if !ok { - return fmt.Errorf("manifest does not support the subject field: %w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) } subject, err := mSubject.GetSubject() if err != nil { @@ -230,12 +262,12 @@ func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manife } // validate/set subject descriptor if subject == nil || subject.MediaType == "" || subject.Digest == "" || subject.Size <= 0 { - return fmt.Errorf("refers is not set%.0w", types.ErrNotFound) + return fmt.Errorf("refers is not set%.0w", errs.ErrNotFound) } - rSubject := r - rSubject.Tag = "" - rSubject.Digest = subject.Digest.String() + // remove from cache + rSubject := r.SetDigest(subject.Digest.String()) + reg.cacheRL.Delete(rSubject) // if referrer API is available, nothing to do, return if reg.referrerPing(ctx, rSubject) { @@ -243,7 +275,7 @@ func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manife } // fallback to using tag schema for refers - rl, err := reg.referrerListTag(ctx, rSubject) + rl, err := reg.referrerListByTag(ctx, rSubject) if err != nil { return err } @@ -271,7 +303,7 @@ func (reg *Reg) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) // get subject field mSubject, ok := m.(manifest.Subjecter) if !ok { - return fmt.Errorf("manifest does not support the subject field: %w", types.ErrUnsupportedMediaType) + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) } subject, err := mSubject.GetSubject() if err != nil { @@ -279,20 +311,15 @@ func (reg *Reg) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) } // validate/set subject descriptor if subject == nil || subject.MediaType == "" || subject.Digest == "" || subject.Size <= 0 { - return fmt.Errorf("subject is not set%.0w", types.ErrNotFound) - } - - rSubject := r - rSubject.Tag = "" - rSubject.Digest = subject.Digest.String() - - // if referrer API is available, return - if reg.referrerPing(ctx, rSubject) { - return nil + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) } + // lock to avoid internal race conditions between pulling and pushing tag + reg.muRefTag.Lock() + defer reg.muRefTag.Unlock() // fallback to using tag schema for refers - rl, err := reg.referrerListTag(ctx, rSubject) + rSubject := r.SetDigest(subject.Digest.String()) + rl, err := reg.referrerListByTag(ctx, rSubject) if err != nil { return err } @@ -300,16 +327,37 @@ func (reg *Reg) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) if err != nil { return err } + // ensure the referrer list does not have a subject itself (avoiding circular locks) + if ms, ok := rl.Manifest.(manifest.Subjecter); ok { + mDesc, err := ms.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.MediaType != "" && mDesc.Size > 0 { + return fmt.Errorf("fallback referrers manifest should not have a subject: %s", rSubject.CommonName()) + } + } // push updated referrer list by tag rlTag, err := referrer.FallbackTag(rSubject) if err != nil { return err } - return reg.ManifestPut(ctx, rlTag, rl.Manifest) + if len(rl.Tags) == 0 { + rl.Tags = []string{rlTag.Tag} + } + err = reg.ManifestPut(ctx, rlTag, rl.Manifest) + if err == nil { + reg.cacheRL.Set(rSubject, rl) + } + return err } // referrerPing verifies the registry supports the referrers API func (reg *Reg) referrerPing(ctx context.Context, r ref.Ref) bool { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if ok { + return referrerEnabled + } req := ®http.Req{ Host: r.Registry, APIs: map[string]reghttp.ReqAPI{ @@ -322,8 +370,11 @@ func (reg *Reg) referrerPing(ctx context.Context, r ref.Ref) bool { } resp, err := reg.reghttp.Do(ctx, req) if err != nil { + reg.featureSet("referrer", r.Registry, r.Repository, false) return false } - resp.Close() - return resp.HTTPResponse().StatusCode == 200 + _ = resp.Close() + result := resp.HTTPResponse().StatusCode == 200 + reg.featureSet("referrer", r.Registry, r.Repository, result) + return result } diff --git a/vendor/github.com/regclient/regclient/scheme/reg/reg.go b/vendor/github.com/regclient/regclient/scheme/reg/reg.go index b7019f423..1a258509c 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/reg.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/reg.go @@ -6,10 +6,15 @@ import ( "sync" "time" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/cache" "github.com/regclient/regclient/internal/reghttp" - "github.com/regclient/regclient/scheme" - "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/throttle" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" ) const ( @@ -21,32 +26,58 @@ const ( defaultBlobChunkLimit = 1024 * 1024 * 1024 // defaultBlobMax is disabled to support registries without chunked upload support defaultBlobMax = -1 + // defaultManifestMaxPull limits the largest manifest that will be pulled + defaultManifestMaxPull = 1024 * 1024 * 8 + // defaultManifestMaxPush limits the largest manifest that will be pushed + defaultManifestMaxPush = 1024 * 1024 * 4 ) // Reg is used for interacting with remote registry servers type Reg struct { - reghttp *reghttp.Client - reghttpOpts []reghttp.Opts - log *logrus.Logger - hosts map[string]*config.Host - blobChunkSize int64 - blobChunkLimit int64 - blobMaxPut int64 - mu sync.Mutex + reghttp *reghttp.Client + reghttpOpts []reghttp.Opts + log *logrus.Logger + hosts map[string]*config.Host + features map[featureKey]*featureVal + blobChunkSize int64 + blobChunkLimit int64 + blobMaxPut int64 + manifestMaxPull int64 + manifestMaxPush int64 + cacheMan *cache.Cache[ref.Ref, manifest.Manifest] + cacheRL *cache.Cache[ref.Ref, referrer.ReferrerList] + muHost sync.Mutex + muRefTag sync.Mutex } +type featureKey struct { + kind string + reg string + repo string +} +type featureVal struct { + enabled bool + expire time.Time +} + +var featureExpire = time.Minute * time.Duration(5) + // Opts provides options to access registries type Opts func(*Reg) // New returns a Reg pointer with any provided options func New(opts ...Opts) *Reg { r := Reg{ - reghttpOpts: []reghttp.Opts{}, - blobChunkSize: defaultBlobChunk, - blobChunkLimit: defaultBlobChunkLimit, - blobMaxPut: defaultBlobMax, - hosts: map[string]*config.Host{}, + reghttpOpts: []reghttp.Opts{}, + blobChunkSize: defaultBlobChunk, + blobChunkLimit: defaultBlobChunkLimit, + blobMaxPut: defaultBlobMax, + manifestMaxPull: defaultManifestMaxPull, + manifestMaxPush: defaultManifestMaxPush, + hosts: map[string]*config.Host{}, + features: map[featureKey]*featureVal{}, } + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithConfigHost(r.hostGet)) for _, opt := range opts { opt(&r) } @@ -54,20 +85,60 @@ func New(opts ...Opts) *Reg { return &r } -// Info is experimental and may be removed in the future -func (reg *Reg) Info() scheme.Info { - return scheme.Info{} +// Throttle is used to limit concurrency +func (reg *Reg) Throttle(r ref.Ref, put bool) []*throttle.Throttle { + tList := []*throttle.Throttle{} + host := reg.hostGet(r.Registry) + t := host.Throttle() + if t != nil { + tList = append(tList, t) + } + if !put { + for _, mirror := range host.Mirrors { + t := reg.hostGet(mirror).Throttle() + if t != nil { + tList = append(tList, t) + } + } + } + return tList } func (reg *Reg) hostGet(hostname string) *config.Host { - reg.mu.Lock() - defer reg.mu.Unlock() + reg.muHost.Lock() + defer reg.muHost.Unlock() if _, ok := reg.hosts[hostname]; !ok { - reg.hosts[hostname] = config.HostNewName(hostname) + newHost := config.HostNewName(hostname) + // check for normalized hostname + if newHost.Name != hostname { + hostname = newHost.Name + if h, ok := reg.hosts[hostname]; ok { + return h + } + } + reg.hosts[hostname] = newHost } return reg.hosts[hostname] } +// featureGet returns enabled and ok +func (reg *Reg) featureGet(kind, registry, repo string) (bool, bool) { + reg.muHost.Lock() + defer reg.muHost.Unlock() + if v, ok := reg.features[featureKey{kind: kind, reg: registry, repo: repo}]; ok { + if time.Now().Before(v.expire) { + return v.enabled, true + } + } + return false, false +} + +func (reg *Reg) featureSet(kind, registry, repo string, enabled bool) { + reg.muHost.Lock() + reg.features[featureKey{kind: kind, reg: registry, repo: repo}] = &featureVal{enabled: enabled, expire: time.Now().Add(featureExpire)} + reg.muHost.Unlock() +} + // WithBlobSize overrides default blob sizes func WithBlobSize(size, max int64) Opts { return func(r *Reg) { @@ -92,6 +163,16 @@ func WithBlobLimit(limit int64) Opts { } } +// WithCache defines a cache used for various requests +func WithCache(timeout time.Duration, count int) Opts { + return func(r *Reg) { + cm := cache.New[ref.Ref, manifest.Manifest](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheMan = &cm + crl := cache.New[ref.Ref, referrer.ReferrerList](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheRL = &crl + } +} + // WithCerts adds certificates func WithCerts(certs [][]byte) Opts { return func(r *Reg) { @@ -122,7 +203,6 @@ func WithConfigHosts(configHosts []*config.Host) Opts { } r.hosts[host.Name] = host } - r.reghttpOpts = append(r.reghttpOpts, reghttp.WithConfigHosts(configHosts)) } } @@ -148,6 +228,14 @@ func WithLog(log *logrus.Logger) Opts { } } +// WithManifestMax sets the push and pull limits for manifests +func WithManifestMax(push, pull int64) Opts { + return func(r *Reg) { + r.manifestMaxPush = push + r.manifestMaxPull = pull + } +} + // WithRetryLimit restricts the number of retries (defaults to 5) func WithRetryLimit(l int) Opts { return func(r *Reg) { diff --git a/vendor/github.com/regclient/regclient/scheme/reg/repo.go b/vendor/github.com/regclient/regclient/scheme/reg/repo.go index 9bc8f5c59..cea8f0ac1 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/repo.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/repo.go @@ -8,10 +8,12 @@ import ( "net/url" "strconv" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/reghttp" "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/repo" - "github.com/sirupsen/logrus" ) // RepoList returns a list of repositories on a registry @@ -63,7 +65,7 @@ func (reg *Reg) RepoList(ctx context.Context, hostname string, opts ...scheme.Re }).Warn("Failed to read repo list") return nil, fmt.Errorf("failed to read repo list for %s: %w", hostname, err) } - mt := resp.HTTPResponse().Header.Get("Content-Type") + mt := mediatype.Base(resp.HTTPResponse().Header.Get("Content-Type")) rl, err := repo.New( repo.WithMT(mt), repo.WithRaw(respBody), diff --git a/vendor/github.com/regclient/regclient/scheme/reg/tag.go b/vendor/github.com/regclient/regclient/scheme/reg/tag.go index 64c511145..fe9ac5a66 100644 --- a/vendor/github.com/regclient/regclient/scheme/reg/tag.go +++ b/vendor/github.com/regclient/regclient/scheme/reg/tag.go @@ -17,16 +17,20 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/regclient/regclient/internal/httplink" "github.com/regclient/regclient/internal/reghttp" "github.com/regclient/regclient/scheme" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/tag" - "github.com/sirupsen/logrus" ) // TagDelete removes a tag from a repository. @@ -35,7 +39,7 @@ import ( func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { var tempManifest manifest.Manifest if r.Tag == "" { - return types.ErrMissingTag + return errs.ErrMissingTag } // attempt to delete the tag directly, available in OCI distribution-spec, and Hub API @@ -68,7 +72,7 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { // lookup the current manifest media type curManifest, err := reg.ManifestHead(ctx, r) - if err != nil && errors.Is(err, types.ErrUnsupportedAPI) { + if err != nil && errors.Is(err, errs.ErrUnsupportedAPI) { curManifest, err = reg.ManifestGet(ctx, r) } if err != nil { @@ -86,19 +90,21 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { "delete-date": now.String(), }, }, - OS: "linux", - Architecture: "amd64", + Platform: platform.Platform{ + OS: "linux", + Architecture: "amd64", + }, History: []v1.History{ { Created: &now, CreatedBy: "# regclient", - Comment: "scratch blob", + Comment: "empty JSON blob", }, }, RootFS: v1.RootFS{ Type: "layers", DiffIDs: []digest.Digest{ - types.ScratchDigest, + descriptor.EmptyDigest, }, }, } @@ -116,20 +122,20 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { // create manifest with config, matching the original tag manifest type switch manifest.GetMediaType(curManifest) { - case types.MediaTypeOCI1Manifest, types.MediaTypeOCI1ManifestList: + case mediatype.OCI1Manifest, mediatype.OCI1ManifestList: tempManifest, err = manifest.New(manifest.WithOrig(v1.Manifest{ Versioned: v1.ManifestSchemaVersion, - MediaType: types.MediaTypeOCI1Manifest, - Config: types.Descriptor{ - MediaType: types.MediaTypeOCI1ImageConfig, + MediaType: mediatype.OCI1Manifest, + Config: descriptor.Descriptor{ + MediaType: mediatype.OCI1ImageConfig, Digest: confDigest, Size: int64(len(confB)), }, - Layers: []types.Descriptor{ + Layers: []descriptor.Descriptor{ { - MediaType: types.MediaTypeOCI1Layer, - Size: int64(len(types.ScratchData)), - Digest: types.ScratchDigest, + MediaType: mediatype.OCI1Layer, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, }, }, })) @@ -139,16 +145,16 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { default: // default to the docker v2 schema tempManifest, err = manifest.New(manifest.WithOrig(schema2.Manifest{ Versioned: schema2.ManifestSchemaVersion, - Config: types.Descriptor{ - MediaType: types.MediaTypeDocker2ImageConfig, + Config: descriptor.Descriptor{ + MediaType: mediatype.Docker2ImageConfig, Digest: confDigest, Size: int64(len(confB)), }, - Layers: []types.Descriptor{ + Layers: []descriptor.Descriptor{ { - MediaType: types.MediaTypeDocker2LayerGzip, - Size: int64(len(types.ScratchData)), - Digest: types.ScratchDigest, + MediaType: mediatype.Docker2LayerGzip, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, }, }, })) @@ -160,14 +166,14 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { "ref": r.Reference, }).Debug("Sending dummy manifest to replace tag") - // push scratch layer - _, err = reg.BlobPut(ctx, r, types.Descriptor{Digest: types.ScratchDigest, Size: int64(len(types.ScratchData))}, bytes.NewReader(types.ScratchData)) + // push empty layer + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: descriptor.EmptyDigest, Size: int64(len(descriptor.EmptyData))}, bytes.NewReader(descriptor.EmptyData)) if err != nil { return err } // push config - _, err = reg.BlobPut(ctx, r, types.Descriptor{Digest: confDigest, Size: int64(len(confB))}, bytes.NewReader(confB)) + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: confDigest, Size: int64(len(confB))}, bytes.NewReader(confB)) if err != nil { return fmt.Errorf("failed sending dummy config to delete %s: %w", r.CommonName(), err) } @@ -303,7 +309,7 @@ func (reg *Reg) tagListOCI(ctx context.Context, r ref.Ref, config scheme.TagConf return tl, nil } -func (reg *Reg) tagListLink(ctx context.Context, r ref.Ref, config scheme.TagConfig, link *url.URL) (*tag.List, error) { +func (reg *Reg) tagListLink(ctx context.Context, r ref.Ref, _ scheme.TagConfig, link *url.URL) (*tag.List, error) { headers := http.Header{ "Accept": []string{"application/json"}, } diff --git a/vendor/github.com/regclient/regclient/scheme/scheme.go b/vendor/github.com/regclient/regclient/scheme/scheme.go index e8cb832ad..f55c72786 100644 --- a/vendor/github.com/regclient/regclient/scheme/scheme.go +++ b/vendor/github.com/regclient/regclient/scheme/scheme.go @@ -1,209 +1,220 @@ -// Package scheme defines the interface for various reference schemes +// Package scheme defines the interface for various reference schemes. package scheme import ( "context" "io" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/internal/throttle" "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ping" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/referrer" "github.com/regclient/regclient/types/tag" ) -// API is used to interface between different methods to store images +// API is used to interface between different methods to store images. type API interface { - // Info is experimental, do not use - Info() Info - - // BlobDelete removes a blob from the repository - BlobDelete(ctx context.Context, r ref.Ref, d types.Descriptor) error - // BlobGet retrieves a blob, returning a reader - BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) - // BlobHead verifies the existence of a blob, the reader contains the headers but no body to read - BlobHead(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) - // BlobMount attempts to perform a server side copy of the blob - BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor) error - // BlobPut sends a blob to the repository, returns the digest and size when successful - BlobPut(ctx context.Context, r ref.Ref, d types.Descriptor, rdr io.Reader) (types.Descriptor, error) - - // ManifestDelete removes a manifest, including all tags that point to that manifest + // BlobDelete removes a blob from the repository. + BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error + // BlobGet retrieves a blob, returning a reader. + BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobHead verifies the existence of a blob, the reader contains the headers but no body to read. + BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobMount attempts to perform a server side copy of the blob. + BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error + // BlobPut sends a blob to the repository, returns the digest and size when successful. + BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) + + // ManifestDelete removes a manifest, including all tags that point to that manifest. ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error - // ManifestGet retrieves a manifest from a repository + // ManifestGet retrieves a manifest from a repository. ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) - // ManifestHead gets metadata about the manifest (existence, digest, mediatype, size) + // ManifestHead gets metadata about the manifest (existence, digest, mediatype, size). ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) - // ManifestPut sends a manifest to the repository + // ManifestPut sends a manifest to the repository. ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error - // ReferrerList returns a list of referrers to a given reference + // Ping verifies access to a registry or equivalent. + Ping(ctx context.Context, r ref.Ref) (ping.Result, error) + + // ReferrerList returns a list of referrers to a given reference. ReferrerList(ctx context.Context, r ref.Ref, opts ...ReferrerOpts) (referrer.ReferrerList, error) - // TagDelete removes a tag from the repository + // TagDelete removes a tag from the repository. TagDelete(ctx context.Context, r ref.Ref) error - // TagList returns a list of tags from the repository + // TagList returns a list of tags from the repository. TagList(ctx context.Context, r ref.Ref, opts ...TagOpts) (*tag.List, error) } -// Closer is used to check if a scheme implements the Close API +// Closer is used to check if a scheme implements the Close API. type Closer interface { Close(ctx context.Context, r ref.Ref) error } -// Info provides details on the scheme, this is experimental, do not use -type Info struct { - ManifestPushFirst bool +// GCLocker is used to indicate locking is available for GC management. +type GCLocker interface { + // GCLock a reference to prevent GC from triggering during a put, locks are not exclusive. + GCLock(r ref.Ref) + // GCUnlock a reference to allow GC (once all locks are released). + // The reference should be closed after this step and unlock should only be called once per each Lock call. + GCUnlock(r ref.Ref) +} + +// Throttler is used to indicate the scheme implements Throttle. +type Throttler interface { + Throttle(r ref.Ref, put bool) []*throttle.Throttle } -// ManifestConfig is used by schemes to import ManifestOpts +// ManifestConfig is used by schemes to import [ManifestOpts]. type ManifestConfig struct { CheckReferrers bool Child bool // used when pushing a child of a manifest list, skips indexing in ocidir Manifest manifest.Manifest } -// ManifestOpts is used to set options on manifest APIs +// ManifestOpts is used to set options on manifest APIs. type ManifestOpts func(*ManifestConfig) -// WithManifestCheckReferrers is used when deleting a manifest -// It indicates the manifest should be fetched and referrers should be deleted if defined +// WithManifestCheckReferrers is used when deleting a manifest. +// It indicates the manifest should be fetched and referrers should be deleted if defined. func WithManifestCheckReferrers() ManifestOpts { return func(config *ManifestConfig) { config.CheckReferrers = true } } -// WithManifestChild indicates the API call is on a child manifest -// This is used internally when copying multi-platform manifests -// This bypasses tracking of an untagged digest in ocidir which is needed for garbage collection +// WithManifestChild indicates the API call is on a child manifest. +// This is used internally when copying multi-platform manifests. +// This bypasses tracking of an untagged digest in ocidir which is needed for garbage collection. func WithManifestChild() ManifestOpts { return func(config *ManifestConfig) { config.Child = true } } -// WithManifest is used to pass the manifest to a method to avoid an extra GET request -// This is used on a delete to check for referrers +// WithManifest is used to pass the manifest to a method to avoid an extra GET request. +// This is used on a delete to check for referrers. func WithManifest(m manifest.Manifest) ManifestOpts { return func(mc *ManifestConfig) { mc.Manifest = m } } -// ReferrerConfig is used by schemes to import ReferrerOpts +// ReferrerConfig is used by schemes to import [ReferrerOpts]. type ReferrerConfig struct { - FilterArtifactType string - FilterAnnotation map[string]string - Platform string + MatchOpt descriptor.MatchOpt // filter/sort results + Platform string // get referrers for a specific platform } -// ReferrerOpts is used to set options on referrer APIs +// ReferrerOpts is used to set options on referrer APIs. type ReferrerOpts func(*ReferrerConfig) -// WithReferrerAT filters by a specific artifactType value +// WithReferrerMatchOpt filters results using [descriptor.MatchOpt]. +func WithReferrerMatchOpt(mo descriptor.MatchOpt) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt = mo + } +} + +// WithReferrerPlatform gets referrers for a single platform from a multi-platform manifest. +func WithReferrerPlatform(p string) ReferrerOpts { + return func(config *ReferrerConfig) { + config.Platform = p + } +} + +// WithReferrerAT filters by a specific artifactType value. +// +// Deprecated: replace with [WithReferrerMatchOpt]. func WithReferrerAT(at string) ReferrerOpts { return func(config *ReferrerConfig) { - config.FilterArtifactType = at + config.MatchOpt.ArtifactType = at } } -// WithReferrerAnnotations filters by a list of annotations, all of which must match +// WithReferrerAnnotations filters by a list of annotations, all of which must match. +// +// Deprecated: replace with [WithReferrerMatchOpt]. func WithReferrerAnnotations(annotations map[string]string) ReferrerOpts { return func(config *ReferrerConfig) { - if config.FilterAnnotation == nil { - config.FilterAnnotation = annotations + if config.MatchOpt.Annotations == nil { + config.MatchOpt.Annotations = annotations } else { for k, v := range annotations { - config.FilterAnnotation[k] = v + config.MatchOpt.Annotations[k] = v } } } } -// ReferrerFilter filters the referrer list according to the config +// WithReferrerSort orders the resulting referrers listing according to a specified annotation. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +func WithReferrerSort(annotation string, desc bool) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt.SortAnnotation = annotation + config.MatchOpt.SortDesc = desc + } +} + +// ReferrerFilter filters the referrer list according to the config. func ReferrerFilter(config ReferrerConfig, rlIn referrer.ReferrerList) referrer.ReferrerList { - rlOut := referrer.ReferrerList{ + return referrer.ReferrerList{ Subject: rlIn.Subject, Manifest: rlIn.Manifest, Annotations: rlIn.Annotations, Tags: rlIn.Tags, - } - rlOut.Descriptors = make([]types.Descriptor, len(rlIn.Descriptors)) - copy(rlOut.Descriptors, rlIn.Descriptors) - if config.FilterArtifactType != "" && len(rlOut.Descriptors) > 0 { - for i := len(rlOut.Descriptors) - 1; i >= 0; i-- { - if rlOut.Descriptors[i].ArtifactType != config.FilterArtifactType { - rlOut.Descriptors = append(rlOut.Descriptors[:i], rlOut.Descriptors[i+1:]...) - } - } - } - for k, v := range config.FilterAnnotation { - if len(rlOut.Descriptors) > 0 { - for i := len(rlOut.Descriptors) - 1; i >= 0; i-- { - if rlOut.Descriptors[i].Annotations == nil { - rlOut.Descriptors = append(rlOut.Descriptors[:i], rlOut.Descriptors[i+1:]...) - } else if rlVal, ok := rlOut.Descriptors[i].Annotations[k]; !ok || v != "" && rlVal != v { - rlOut.Descriptors = append(rlOut.Descriptors[:i], rlOut.Descriptors[i+1:]...) - } - } - } - } - return rlOut -} - -// WithReferrerPlatform gets referrers for a single platform from a multi-platform manifest -func WithReferrerPlatform(platform string) ReferrerOpts { - return func(config *ReferrerConfig) { - config.Platform = platform + Descriptors: descriptor.DescriptorListFilter(rlIn.Descriptors, config.MatchOpt), } } -// RepoConfig is used by schemes to import RepoOpts +// RepoConfig is used by schemes to import [RepoOpts]. type RepoConfig struct { Limit int Last string } -// RepoOpts is used to set options on repo APIs +// RepoOpts is used to set options on repo APIs. type RepoOpts func(*RepoConfig) -// WithRepoLimit passes a maximum number of repositories to return to the repository list API -// Registries may ignore this +// WithRepoLimit passes a maximum number of repositories to return to the repository list API. +// Registries may ignore this. func WithRepoLimit(l int) RepoOpts { return func(config *RepoConfig) { config.Limit = l } } -// WithRepoLast passes the last received repository for requesting the next batch of repositories -// Registries may ignore this +// WithRepoLast passes the last received repository for requesting the next batch of repositories. +// Registries may ignore this. func WithRepoLast(l string) RepoOpts { return func(config *RepoConfig) { config.Last = l } } -// TagConfig is used by schemes to import TagOpts +// TagConfig is used by schemes to import [TagOpts]. type TagConfig struct { Limit int Last string } -// TagOpts is used to set options on tag APIs +// TagOpts is used to set options on tag APIs. type TagOpts func(*TagConfig) -// WithTagLimit passes a maximum number of tags to return to the tag list API -// Registries may ignore this +// WithTagLimit passes a maximum number of tags to return to the tag list API. +// Registries may ignore this. func WithTagLimit(limit int) TagOpts { return func(t *TagConfig) { t.Limit = limit } } -// WithTagLast passes the last received tag for requesting the next batch of tags -// Registries may ignore this +// WithTagLast passes the last received tag for requesting the next batch of tags. +// Registries may ignore this. func WithTagLast(last string) TagOpts { return func(t *TagConfig) { t.Last = last diff --git a/vendor/github.com/regclient/regclient/tag.go b/vendor/github.com/regclient/regclient/tag.go index 043e365d5..6b522804b 100644 --- a/vendor/github.com/regclient/regclient/tag.go +++ b/vendor/github.com/regclient/regclient/tag.go @@ -2,8 +2,10 @@ package regclient import ( "context" + "fmt" "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" "github.com/regclient/regclient/types/tag" ) @@ -15,6 +17,9 @@ import ( // 2. Push that manifest to the tag. // 3. Delete the digest for that new manifest that is only used by that tag. func (rc *RegClient) TagDelete(ctx context.Context, r ref.Ref) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return err @@ -24,6 +29,9 @@ func (rc *RegClient) TagDelete(ctx context.Context, r ref.Ref) error { // TagList returns a tag list from a repository func (rc *RegClient) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } schemeAPI, err := rc.schemeGet(r.Scheme) if err != nil { return nil, err diff --git a/vendor/github.com/regclient/regclient/types/blob/blob.go b/vendor/github.com/regclient/regclient/types/blob/blob.go index 563239edb..86d215cba 100644 --- a/vendor/github.com/regclient/regclient/types/blob/blob.go +++ b/vendor/github.com/regclient/regclient/types/blob/blob.go @@ -1,23 +1,44 @@ -// Package blob is the underlying type for pushing and pulling blobs +// Package blob is the underlying type for pushing and pulling blobs. package blob import ( "io" "net/http" - "github.com/regclient/regclient/types" + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/ref" ) -// Blob interface is used for returning blobs +// Blob interface is used for returning blobs. type Blob interface { - Common + // GetDescriptor returns the descriptor associated with the blob. + GetDescriptor() descriptor.Descriptor + // RawBody returns the raw content of the blob. RawBody() ([]byte, error) + // RawHeaders returns the headers received from the registry. + RawHeaders() http.Header + // Response returns the response associated with the blob. + Response() *http.Response + + // Digest returns the provided or calculated digest of the blob. + // + // Deprecated: Digest should be replaced by GetDescriptor().Digest. + Digest() digest.Digest + // Length returns the provided or calculated length of the blob. + // + // Deprecated: Length should be replaced by GetDescriptor().Size. + Length() int64 + // MediaType returns the Content-Type header received from the registry. + // + // Deprecated: MediaType should be replaced by GetDescriptor().MediaType. + MediaType() string } type blobConfig struct { - desc types.Descriptor + desc descriptor.Descriptor header http.Header image *v1.Image r ref.Ref @@ -26,51 +47,52 @@ type blobConfig struct { rawBody []byte } +// Opts is used for options to create a new blob. type Opts func(*blobConfig) -// WithDesc specifies the descriptor associated with the blob -func WithDesc(d types.Descriptor) Opts { +// WithDesc specifies the descriptor associated with the blob. +func WithDesc(d descriptor.Descriptor) Opts { return func(bc *blobConfig) { bc.desc = d } } -// WithHeader defines the headers received when pulling a blob +// WithHeader defines the headers received when pulling a blob. func WithHeader(header http.Header) Opts { return func(bc *blobConfig) { bc.header = header } } -// WithImage provides the OCI Image config needed for config blobs +// WithImage provides the OCI Image config needed for config blobs. func WithImage(image v1.Image) Opts { return func(bc *blobConfig) { bc.image = &image } } -// WithRawBody defines the raw blob contents for OCIConfig +// WithRawBody defines the raw blob contents for OCIConfig. func WithRawBody(raw []byte) Opts { return func(bc *blobConfig) { bc.rawBody = raw } } -// WithReader defines the reader for a new blob +// WithReader defines the reader for a new blob. func WithReader(rc io.Reader) Opts { return func(bc *blobConfig) { bc.rdr = rc } } -// WithRef specifies the reference where the blob was pulled from +// WithRef specifies the reference where the blob was pulled from. func WithRef(r ref.Ref) Opts { return func(bc *blobConfig) { bc.r = r } } -// WithResp includes the http response, which is used to extract the headers and reader +// WithResp includes the http response, which is used to extract the headers and reader. func WithResp(resp *http.Response) Opts { return func(bc *blobConfig) { bc.resp = resp diff --git a/vendor/github.com/regclient/regclient/types/blob/common.go b/vendor/github.com/regclient/regclient/types/blob/common.go index b60d108fd..44e652ad6 100644 --- a/vendor/github.com/regclient/regclient/types/blob/common.go +++ b/vendor/github.com/regclient/regclient/types/blob/common.go @@ -8,58 +8,55 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/types" + + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/ref" ) -// Common interface is provided by all Blob implementations -type Common interface { - GetDescriptor() types.Descriptor - Response() *http.Response - RawHeaders() http.Header - - // Deprecated: Digest should be replaced by GetDescriptor().Digest - Digest() digest.Digest - // Deprecated: Length should be replaced by GetDescriptor().Size - Length() int64 - // Deprecated: MediaType should be replaced by GetDescriptor().MediaType - MediaType() string -} +// Common was previously an interface. A type alias is provided for upgrades. +type Common = *BCommon -type common struct { +// BCommon is a common struct for all blobs which includes various shared methods. +type BCommon struct { r ref.Ref - desc types.Descriptor + desc descriptor.Descriptor blobSet bool rawHeader http.Header resp *http.Response } -// GetDescriptor returns the descriptor associated with the blob -func (b *common) GetDescriptor() types.Descriptor { - return b.desc +// GetDescriptor returns the descriptor associated with the blob. +func (c *BCommon) GetDescriptor() descriptor.Descriptor { + return c.desc } -// Digest returns the provided or calculated digest of the blob -func (b *common) Digest() digest.Digest { - return b.desc.Digest +// Digest returns the provided or calculated digest of the blob. +// +// Deprecated: Digest should be replaced by GetDescriptor().Digest, see [GetDescriptor]. +func (c *BCommon) Digest() digest.Digest { + return c.desc.Digest } -// Length returns the provided or calculated length of the blob -func (b *common) Length() int64 { - return b.desc.Size +// Length returns the provided or calculated length of the blob. +// +// Deprecated: Length should be replaced by GetDescriptor().Size, see [GetDescriptor]. +func (c *BCommon) Length() int64 { + return c.desc.Size } -// MediaType returns the Content-Type header received from the registry -func (b *common) MediaType() string { - return b.desc.MediaType +// MediaType returns the Content-Type header received from the registry. +// +// Deprecated: MediaType should be replaced by GetDescriptor().MediaType, see [GetDescriptor]. +func (c *BCommon) MediaType() string { + return c.desc.MediaType } -// RawHeaders returns the headers received from the registry -func (b *common) RawHeaders() http.Header { - return b.rawHeader +// RawHeaders returns the headers received from the registry. +func (c *BCommon) RawHeaders() http.Header { + return c.rawHeader } -// Response returns the response associated with the blob -func (b *common) Response() *http.Response { - return b.resp +// Response returns the response associated with the blob. +func (c *BCommon) Response() *http.Response { + return c.resp } diff --git a/vendor/github.com/regclient/regclient/types/blob/ociconfig.go b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go index 21b1362af..a3db153d9 100644 --- a/vendor/github.com/regclient/regclient/types/blob/ociconfig.go +++ b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go @@ -9,27 +9,24 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/types" + + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" ) -// OCIConfig wraps an OCI Config struct extracted from a Blob -type OCIConfig interface { - Blob - GetConfig() v1.Image - SetConfig(v1.Image) -} +// OCIConfig was previously an interface. A type alias is provided for upgrading. +type OCIConfig = *BOCIConfig -// ociConfig includes an OCI Config struct extracted from a Blob -// Image is included as an anonymous field to facilitate json and templating calls transparently -type ociConfig struct { - common +// BOCIConfig includes an OCI Image Config struct that may be extracted from or pushed to a blob. +type BOCIConfig struct { + BCommon rawBody []byte - v1.Image + image v1.Image } -// NewOCIConfig creates a new BlobOCIConfig from an OCI Image -func NewOCIConfig(opts ...Opts) OCIConfig { +// NewOCIConfig creates a new BOCIConfig. +// When created from an existing blob, a BOCIConfig will be created using BReader.ToOCIConfig(). +func NewOCIConfig(opts ...Opts) *BOCIConfig { bc := blobConfig{} for _, opt := range opts { opt(&bc) @@ -53,52 +50,79 @@ func NewOCIConfig(opts ...Opts) OCIConfig { bc.desc.Digest = digest.FromBytes(bc.rawBody) bc.desc.Size = int64(len(bc.rawBody)) if bc.desc.MediaType == "" { - bc.desc.MediaType = types.MediaTypeOCI1ImageConfig + bc.desc.MediaType = mediatype.OCI1ImageConfig } } - - c := common{ - desc: bc.desc, - r: bc.r, - rawHeader: bc.header, - resp: bc.resp, - } - b := ociConfig{ - common: c, + b := BOCIConfig{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, rawBody: bc.rawBody, } if bc.image != nil { - b.Image = *bc.image + b.image = *bc.image b.blobSet = true } return &b } -// GetConfig returns OCI config -func (b *ociConfig) GetConfig() v1.Image { - return b.Image +// GetConfig returns OCI config. +func (oc *BOCIConfig) GetConfig() v1.Image { + return oc.image } -// RawBody returns the original body from the request -func (b *ociConfig) RawBody() ([]byte, error) { +// RawBody returns the original body from the request. +func (oc *BOCIConfig) RawBody() ([]byte, error) { var err error - if !b.blobSet { + if !oc.blobSet { return []byte{}, fmt.Errorf("Blob is not defined") } - if len(b.rawBody) == 0 { - b.rawBody, err = json.Marshal(b.Image) + if len(oc.rawBody) == 0 { + oc.rawBody, err = json.Marshal(oc.image) + } + return oc.rawBody, err +} + +// SetConfig updates the config, including raw body and descriptor. +func (oc *BOCIConfig) SetConfig(image v1.Image) { + oc.image = image + oc.rawBody, _ = json.Marshal(oc.image) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig } - return b.rawBody, err + oc.desc.Digest = digest.FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true } -// SetConfig updates the config, including raw body and descriptor -func (b *ociConfig) SetConfig(c v1.Image) { - b.Image = c - b.rawBody, _ = json.Marshal(b.Image) - if b.desc.MediaType == "" { - b.desc.MediaType = types.MediaTypeOCI1ImageConfig +// MarshalJSON passes through the marshalling to the underlying image if rawBody is not available. +func (oc *BOCIConfig) MarshalJSON() ([]byte, error) { + if !oc.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if len(oc.rawBody) > 0 { + return oc.rawBody, nil + } + return json.Marshal(oc.image) +} + +// UnmarshalJSON extracts json content and populates the content. +func (oc *BOCIConfig) UnmarshalJSON(data []byte) error { + image := v1.Image{} + err := json.Unmarshal(data, &image) + if err != nil { + return err + } + oc.rawBody = make([]byte, len(data)) + copy(oc.rawBody, data) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig } - b.desc.Digest = digest.FromBytes(b.rawBody) - b.desc.Size = int64(len(b.rawBody)) - b.blobSet = true + oc.desc.Digest = digest.FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true + return nil } diff --git a/vendor/github.com/regclient/regclient/types/blob/reader.go b/vendor/github.com/regclient/regclient/types/blob/reader.go index f569f4f9c..51985e16d 100644 --- a/vendor/github.com/regclient/regclient/types/blob/reader.go +++ b/vendor/github.com/regclient/regclient/types/blob/reader.go @@ -10,27 +10,26 @@ import ( _ "crypto/sha512" "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" ) -// Reader is an unprocessed Blob with an available ReadCloser for reading the Blob -type Reader interface { - Blob - io.ReadCloser - ToOCIConfig() (OCIConfig, error) - ToTarReader() (TarReader, error) -} +// Reader was previously an interface. A type alias is provided for upgrading. +type Reader = *BReader -// reader is the internal struct implementing BlobReader -type reader struct { - common +// BReader is used to read blobs. +type BReader struct { + BCommon readBytes int64 reader io.Reader origRdr io.Reader digester digest.Digester } -// NewReader creates a new reader -func NewReader(opts ...Opts) Reader { +// NewReader creates a new BReader. +func NewReader(opts ...Opts) *BReader { bc := blobConfig{} for _, opt := range opts { opt(&bc) @@ -47,7 +46,7 @@ func NewReader(opts ...Opts) Reader { if bc.header != nil { // extract fields from header if descriptor not passed if bc.desc.MediaType == "" { - bc.desc.MediaType = bc.header.Get("Content-Type") + bc.desc.MediaType = mediatype.Base(bc.header.Get("Content-Type")) } if bc.desc.Size == 0 { cl, _ := strconv.Atoi(bc.header.Get("Content-Length")) @@ -57,125 +56,145 @@ func NewReader(opts ...Opts) Reader { bc.desc.Digest, _ = digest.Parse(bc.header.Get("Docker-Content-Digest")) } } - c := common{ - r: bc.r, - desc: bc.desc, - rawHeader: bc.header, - resp: bc.resp, - } - br := reader{ - common: c, + br := BReader{ + BCommon: BCommon{ + r: bc.r, + desc: bc.desc, + rawHeader: bc.header, + resp: bc.resp, + }, origRdr: bc.rdr, } if bc.rdr != nil { br.blobSet = true br.digester = digest.Canonical.Digester() - br.reader = io.TeeReader(bc.rdr, br.digester.Hash()) + rdr := bc.rdr + if br.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: br.desc.Size, + } + } + br.reader = io.TeeReader(rdr, br.digester.Hash()) } return &br } -func (b *reader) Close() error { - if b.origRdr == nil { +// Close attempts to close the reader and populates/validates the digest. +func (r *BReader) Close() error { + if r.origRdr == nil { return nil } // attempt to close if available in original reader - bc, ok := b.origRdr.(io.Closer) + bc, ok := r.origRdr.(io.Closer) if !ok { return nil } return bc.Close() } -// RawBody returns the original body from the request -func (b *reader) RawBody() ([]byte, error) { - return io.ReadAll(b) +// RawBody returns the original body from the request. +func (r *BReader) RawBody() ([]byte, error) { + return io.ReadAll(r) } -// Read passes through the read operation while computing the digest and tracking the size -func (b *reader) Read(p []byte) (int, error) { - if b.reader == nil { +// Read passes through the read operation while computing the digest and tracking the size. +func (r *BReader) Read(p []byte) (int, error) { + if r.reader == nil { return 0, fmt.Errorf("blob has no reader: %w", io.ErrUnexpectedEOF) } - size, err := b.reader.Read(p) - b.readBytes = b.readBytes + int64(size) + size, err := r.reader.Read(p) + r.readBytes = r.readBytes + int64(size) if err == io.EOF { // check/save size - if b.desc.Size == 0 { - b.desc.Size = b.readBytes - } else if b.readBytes != b.desc.Size { - err = fmt.Errorf("expected size mismatch [expected %d, received %d]: %w", b.desc.Size, b.readBytes, err) + if r.desc.Size == 0 { + r.desc.Size = r.readBytes + } else if r.readBytes < r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrShortRead, r.desc.Size, r.readBytes, err) + } else if r.readBytes > r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrSizeLimitExceeded, r.desc.Size, r.readBytes, err) } // check/save digest - if b.desc.Digest == "" { - b.desc.Digest = b.digester.Digest() - } else if b.desc.Digest != b.digester.Digest() { - err = fmt.Errorf("expected digest mismatch [expected %s, calculated %s]: %w", b.desc.Digest.String(), b.digester.Digest().String(), err) + if r.desc.Digest == "" { + r.desc.Digest = r.digester.Digest() + } else if r.desc.Digest != r.digester.Digest() { + err = fmt.Errorf("%w [expected %s, calculated %s]: %w", errs.ErrDigestMismatch, r.desc.Digest.String(), r.digester.Digest().String(), err) } } return size, err } // Seek passes through the seek operation, reseting or invalidating the digest -func (b *reader) Seek(offset int64, whence int) (int64, error) { +func (r *BReader) Seek(offset int64, whence int) (int64, error) { if offset == 0 && whence == io.SeekCurrent { - return b.readBytes, nil + return r.readBytes, nil } // cannot do an arbitrary seek and still digest without a lot more complication if offset != 0 || whence != io.SeekStart { - return b.readBytes, fmt.Errorf("unable to seek to arbitrary position") + return r.readBytes, fmt.Errorf("unable to seek to arbitrary position") } - rdrSeek, ok := b.origRdr.(io.Seeker) + rdrSeek, ok := r.origRdr.(io.Seeker) if !ok { - return b.readBytes, fmt.Errorf("Seek unsupported") + return r.readBytes, fmt.Errorf("Seek unsupported") } o, err := rdrSeek.Seek(offset, whence) if err != nil || o != 0 { - return b.readBytes, err + return r.readBytes, err } // reset internal offset and digest calculation + rdr := r.origRdr + if r.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: r.desc.Size, + } + } digester := digest.Canonical.Digester() - digestRdr := io.TeeReader(b.origRdr, digester.Hash()) - b.digester = digester - b.readBytes = 0 - b.reader = digestRdr + r.reader = io.TeeReader(rdr, digester.Hash()) + r.digester = digester + r.readBytes = 0 return 0, nil } -// ToOCIConfig converts a blobReader to a BlobOCIConfig -func (b *reader) ToOCIConfig() (OCIConfig, error) { - if !b.blobSet { +// ToOCIConfig converts a BReader to a BOCIConfig. +func (r *BReader) ToOCIConfig() (*BOCIConfig, error) { + if !r.blobSet { return nil, fmt.Errorf("blob is not defined") } - if b.readBytes != 0 { + if r.readBytes != 0 { return nil, fmt.Errorf("unable to convert after read has been performed") } - blobBody, err := io.ReadAll(b) + blobBody, err := io.ReadAll(r) + errC := r.Close() if err != nil { - return nil, fmt.Errorf("error reading image config for %s: %w", b.r.CommonName(), err) + return nil, fmt.Errorf("error reading image config for %s: %w", r.r.CommonName(), err) + } + if errC != nil { + return nil, fmt.Errorf("error closing blob reader: %w", err) } return NewOCIConfig( - WithDesc(b.desc), - WithHeader(b.rawHeader), + WithDesc(r.desc), + WithHeader(r.rawHeader), WithRawBody(blobBody), - WithRef(b.r), - WithResp(b.resp), + WithRef(r.r), + WithResp(r.resp), ), nil } -func (b *reader) ToTarReader() (TarReader, error) { - if !b.blobSet { +// ToTarReader converts a BReader to a BTarReader +func (r *BReader) ToTarReader() (*BTarReader, error) { + if !r.blobSet { return nil, fmt.Errorf("blob is not defined") } - if b.readBytes != 0 { + if r.readBytes != 0 { return nil, fmt.Errorf("unable to convert after read has been performed") } return NewTarReader( - WithDesc(b.desc), - WithHeader(b.rawHeader), - WithRef(b.r), - WithResp(b.resp), - WithReader(b.reader), + WithDesc(r.desc), + WithHeader(r.rawHeader), + WithRef(r.r), + WithResp(r.resp), + WithReader(r.reader), ), nil } diff --git a/vendor/github.com/regclient/regclient/types/blob/tar.go b/vendor/github.com/regclient/regclient/types/blob/tar.go index 43ace3062..9fcd47a46 100644 --- a/vendor/github.com/regclient/regclient/types/blob/tar.go +++ b/vendor/github.com/regclient/regclient/types/blob/tar.go @@ -9,73 +9,66 @@ import ( "strings" "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" "github.com/regclient/regclient/pkg/archive" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" ) -// TarReader reads or writes to a blob with tar contents and optional compression -type TarReader interface { - Blob - io.Closer - GetTarReader() (*tar.Reader, error) - ReadFile(filename string) (*tar.Header, io.Reader, error) -} +// TarReader was previously an interface. A type alias is provided for upgrading. +type TarReader = *BTarReader -type tarReader struct { - common +// BTarReader is used to read individual files from an image layer. +type BTarReader struct { + BCommon origRdr io.Reader reader io.Reader digester digest.Digester tr *tar.Reader } -// NewTarReader creates a TarReader -func NewTarReader(opts ...Opts) TarReader { +// NewTarReader creates a BTarReader. +// Typically a BTarReader will be created using BReader.ToTarReader(). +func NewTarReader(opts ...Opts) *BTarReader { bc := blobConfig{} for _, opt := range opts { opt(&bc) } - c := common{ - desc: bc.desc, - r: bc.r, - rawHeader: bc.header, - resp: bc.resp, - } - tr := tarReader{ - common: c, + tr := BTarReader{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, origRdr: bc.rdr, } if bc.rdr != nil { tr.blobSet = true tr.digester = digest.Canonical.Digester() - tr.reader = io.TeeReader(bc.rdr, tr.digester.Hash()) + rdr := bc.rdr + if tr.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: tr.desc.Size, + } + } + tr.reader = io.TeeReader(rdr, tr.digester.Hash()) } return &tr } -// Close attempts to close the reader and populates/validates the digest -func (tr *tarReader) Close() error { - var err error - if tr.digester != nil { - dig := tr.digester.Digest() - tr.digester = nil - if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { - err = fmt.Errorf("digest mismatch, expected %s, received %s", tr.desc.Digest.String(), dig.String()) - } - tr.desc.Digest = dig - } - if tr.origRdr == nil { - return err - } +// Close attempts to close the reader and populates/validates the digest. +func (tr *BTarReader) Close() error { // attempt to close if available in original reader - if trc, ok := tr.origRdr.(io.Closer); ok { + if trc, ok := tr.origRdr.(io.Closer); ok && trc != nil { return trc.Close() } - return err + return nil } -// GetTarReader returns the tar.Reader for the blob -func (tr *tarReader) GetTarReader() (*tar.Reader, error) { +// GetTarReader returns the tar.Reader for the blob. +func (tr *BTarReader) GetTarReader() (*tar.Reader, error) { if tr.reader == nil { return nil, fmt.Errorf("blob has no reader defined") } @@ -89,8 +82,8 @@ func (tr *tarReader) GetTarReader() (*tar.Reader, error) { return tr.tr, nil } -// RawBody returns the original body from the request -func (tr *tarReader) RawBody() ([]byte, error) { +// RawBody returns the original body from the request. +func (tr *BTarReader) RawBody() ([]byte, error) { if !tr.blobSet { return []byte{}, fmt.Errorf("Blob is not defined") } @@ -101,12 +94,20 @@ func (tr *tarReader) RawBody() ([]byte, error) { if err != nil { return b, err } + if tr.digester != nil { + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return b, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig + } err = tr.Close() return b, err } -// ReadFile parses the tar to find a file -func (tr *tarReader) ReadFile(filename string) (*tar.Header, io.Reader, error) { +// ReadFile parses the tar to find a file. +func (tr *BTarReader) ReadFile(filename string) (*tar.Header, io.Reader, error) { if strings.HasPrefix(filename, ".wh.") { return nil, nil, fmt.Errorf(".wh. prefix is reserved for whiteout files") } @@ -147,10 +148,20 @@ func (tr *tarReader) ReadFile(filename string) (*tar.Header, io.Reader, error) { whiteout = true } } + // EOF encountered if whiteout { - return nil, nil, types.ErrFileDeleted + return nil, nil, errs.ErrFileDeleted + } + if tr.digester != nil { + _, _ = io.Copy(io.Discard, tr.reader) // process/digest any trailing bytes from reader + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return nil, nil, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig } - return nil, nil, types.ErrFileNotFound + return nil, nil, errs.ErrFileNotFound } func tarCmpWhiteout(whFile, tgtFile string) bool { diff --git a/vendor/github.com/regclient/regclient/types/callback.go b/vendor/github.com/regclient/regclient/types/callback.go new file mode 100644 index 000000000..c4c2fad9b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/callback.go @@ -0,0 +1,29 @@ +package types + +type CallbackState int + +const ( + CallbackUndef CallbackState = iota + CallbackSkipped + CallbackStarted + CallbackActive + CallbackFinished + CallbackArchived +) + +type CallbackKind int + +const ( + CallbackManifest CallbackKind = iota + CallbackBlob +) + +func (k CallbackKind) String() string { + switch k { + case CallbackBlob: + return "blob" + case CallbackManifest: + return "manifest" + } + return "unknown" +} diff --git a/vendor/github.com/regclient/regclient/types/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor.go index 1f3807bbe..ad31e7e43 100644 --- a/vendor/github.com/regclient/regclient/types/descriptor.go +++ b/vendor/github.com/regclient/regclient/types/descriptor.go @@ -1,184 +1,27 @@ package types -import ( - "fmt" - "strings" - "text/tabwriter" - - // crypto libraries included for go-digest - _ "crypto/sha256" - _ "crypto/sha512" - - "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/internal/units" - "github.com/regclient/regclient/types/platform" +import "github.com/regclient/regclient/types/descriptor" + +type ( + // Descriptor is used in manifests to refer to content by media type, size, and digest. + // + // Deprecated: replace with [descriptor.Descriptor]. + Descriptor = descriptor.Descriptor + // MatchOpt defines conditions for a match descriptor. + // + // Deprecated: replace with [descriptor.MatchOpt]. + MatchOpt = descriptor.MatchOpt ) -// Descriptor is used in manifests to refer to content by media type, size, and digest. -type Descriptor struct { - // MediaType describe the type of the content. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Data is an embedding of the targeted content. This is encoded as a base64 - // string when marshalled to JSON (automatically, by encoding/json). If - // present, Data can be used directly to avoid fetching the targeted content. - Data []byte `json:"data,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *platform.Platform `json:"platform,omitempty"` - - // ArtifactType is the media type of the artifact this descriptor refers to. - ArtifactType string `json:"artifactType,omitempty"` -} - -var ScratchData = []byte("{}") -var ScratchDigest = digest.FromBytes(ScratchData) -var emptyDigest = digest.FromBytes([]byte{}) -var mtToOCI map[string]string - -func init() { - mtToOCI = map[string]string{ - MediaTypeDocker2ManifestList: MediaTypeOCI1ManifestList, - MediaTypeDocker2Manifest: MediaTypeOCI1Manifest, - MediaTypeDocker2ImageConfig: MediaTypeOCI1ImageConfig, - MediaTypeDocker2LayerGzip: MediaTypeOCI1LayerGzip, - MediaTypeOCI1ManifestList: MediaTypeOCI1ManifestList, - MediaTypeOCI1Manifest: MediaTypeOCI1Manifest, - MediaTypeOCI1ImageConfig: MediaTypeOCI1ImageConfig, - MediaTypeOCI1LayerGzip: MediaTypeOCI1LayerGzip, - } -} - -// GetData decodes the Data field from the descriptor if available -func (d Descriptor) GetData() ([]byte, error) { - if len(d.Data) == 0 && d.Digest != emptyDigest { - return nil, ErrParsingFailed - } - // verify length - if int64(len(d.Data)) != d.Size { - return nil, ErrParsingFailed - } - // generate and verify digest - dDig := digest.FromBytes(d.Data) - if d.Digest != dDig { - return nil, ErrParsingFailed - } - // return data - return d.Data, nil -} - -// Equal indicates the two descriptors are identical, effectively a DeepEqual. -func (d Descriptor) Equal(d2 Descriptor) bool { - if !d.Same(d2) { - return false - } - if d.MediaType != d2.MediaType { - return false - } - if d.ArtifactType != d2.ArtifactType { - return false - } - if d.Platform == nil || d2.Platform == nil { - if d.Platform != nil || d2.Platform != nil { - return false - } - } else if !platform.Match(*d.Platform, *d2.Platform) { - return false - } - if d.URLs == nil || d2.URLs == nil { - if d.URLs != nil || d2.URLs != nil { - return false - } - } else if len(d.URLs) != len(d2.URLs) { - return false - } else { - for i := range d.URLs { - if d.URLs[i] != d2.URLs[i] { - return false - } - } - } - if d.Annotations == nil || d2.Annotations == nil { - if d.Annotations != nil || d2.Annotations != nil { - return false - } - } else if len(d.Annotations) != len(d2.Annotations) { - return false - } else { - for i := range d.Annotations { - if d.Annotations[i] != d2.Annotations[i] { - return false - } - } - } - return true -} - -// Same indicates two descriptors point to the same CAS object. -// This verifies the digest, media type, and size all match -func (d Descriptor) Same(d2 Descriptor) bool { - if d.Digest != d2.Digest || d.Size != d2.Size { - return false - } - // loosen the check on media type since this can be converted from a build - if d.MediaType != d2.MediaType { - if _, ok := mtToOCI[d.MediaType]; !ok { - return false - } else if mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] { - return false - } - } - return true -} - -func (d Descriptor) MarshalPrettyTW(tw *tabwriter.Writer, prefix string) error { - fmt.Fprintf(tw, "%sDigest:\t%s\n", prefix, string(d.Digest)) - fmt.Fprintf(tw, "%sMediaType:\t%s\n", prefix, d.MediaType) - if d.ArtifactType != "" { - fmt.Fprintf(tw, "%sArtifactType:\t%s\n", prefix, d.ArtifactType) - } - switch d.MediaType { - case MediaTypeDocker1Manifest, MediaTypeDocker1ManifestSigned, - MediaTypeDocker2Manifest, MediaTypeDocker2ManifestList, - MediaTypeOCI1Manifest, MediaTypeOCI1ManifestList: - // skip printing size for descriptors to manifests - default: - if d.Size > 100000 { - fmt.Fprintf(tw, "%sSize:\t%s\n", prefix, units.HumanSize(float64(d.Size))) - } else { - fmt.Fprintf(tw, "%sSize:\t%dB\n", prefix, d.Size) - } - } - if p := d.Platform; p != nil && p.OS != "" { - fmt.Fprintf(tw, "%sPlatform:\t%s\n", prefix, p.String()) - if p.OSVersion != "" { - fmt.Fprintf(tw, "%sOSVersion:\t%s\n", prefix, p.OSVersion) - } - if len(p.OSFeatures) > 0 { - fmt.Fprintf(tw, "%sOSFeatures:\t%s\n", prefix, strings.Join(p.OSFeatures, ", ")) - } - } - if len(d.URLs) > 0 { - fmt.Fprintf(tw, "%sURLs:\t%s\n", prefix, strings.Join(d.URLs, ", ")) - } - if d.Annotations != nil { - fmt.Fprintf(tw, "%sAnnotations:\t\n", prefix) - for k, v := range d.Annotations { - fmt.Fprintf(tw, "%s %s:\t%s\n", prefix, k, v) - } - } - return nil -} +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyData]. + EmptyData = descriptor.EmptyData + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyDigest]. + EmptyDigest = descriptor.EmptyDigest + DescriptorListFilter = descriptor.DescriptorListFilter + DescriptorListSearch = descriptor.DescriptorListSearch +) diff --git a/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go new file mode 100644 index 000000000..7afabd22b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go @@ -0,0 +1,293 @@ +// Package descriptor defines the OCI descriptor data structure used in manifests to reference content addressable data. +package descriptor + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +// Descriptor is used in manifests to refer to content by media type, size, and digest. +type Descriptor struct { + // MediaType describe the type of the content. + MediaType string `json:"mediaType"` + + // Digest uniquely identifies the content. + Digest digest.Digest `json:"digest"` + + // Size in bytes of content. + Size int64 `json:"size"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *platform.Platform `json:"platform,omitempty"` + + // ArtifactType is the media type of the artifact this descriptor refers to. + ArtifactType string `json:"artifactType,omitempty"` +} + +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyData = []byte("{}") + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyDigest = digest.FromBytes(EmptyData) + emptyDigest = digest.FromBytes([]byte{}) + mtToOCI map[string]string +) + +func init() { + mtToOCI = map[string]string{ + mediatype.Docker2ManifestList: mediatype.OCI1ManifestList, + mediatype.Docker2Manifest: mediatype.OCI1Manifest, + mediatype.Docker2ImageConfig: mediatype.OCI1ImageConfig, + mediatype.Docker2LayerGzip: mediatype.OCI1LayerGzip, + mediatype.OCI1ManifestList: mediatype.OCI1ManifestList, + mediatype.OCI1Manifest: mediatype.OCI1Manifest, + mediatype.OCI1ImageConfig: mediatype.OCI1ImageConfig, + mediatype.OCI1LayerGzip: mediatype.OCI1LayerGzip, + } +} + +// GetData decodes the Data field from the descriptor if available +func (d Descriptor) GetData() ([]byte, error) { + if len(d.Data) == 0 && d.Digest != emptyDigest { + return nil, errs.ErrParsingFailed + } + // verify length + if int64(len(d.Data)) != d.Size { + return nil, errs.ErrParsingFailed + } + // generate and verify digest + dDig := digest.FromBytes(d.Data) + if d.Digest != dDig { + return nil, errs.ErrParsingFailed + } + // return data + return d.Data, nil +} + +// Equal indicates the two descriptors are identical, effectively a DeepEqual. +func (d Descriptor) Equal(d2 Descriptor) bool { + if !d.Same(d2) { + return false + } + if d.MediaType != d2.MediaType { + return false + } + if d.ArtifactType != d2.ArtifactType { + return false + } + if d.Platform == nil || d2.Platform == nil { + if d.Platform != nil || d2.Platform != nil { + return false + } + } else if !platform.Match(*d.Platform, *d2.Platform) { + return false + } + if d.URLs == nil || d2.URLs == nil { + if d.URLs != nil || d2.URLs != nil { + return false + } + } else if len(d.URLs) != len(d2.URLs) { + return false + } else { + for i := range d.URLs { + if d.URLs[i] != d2.URLs[i] { + return false + } + } + } + if d.Annotations == nil || d2.Annotations == nil { + if d.Annotations != nil || d2.Annotations != nil { + return false + } + } else if len(d.Annotations) != len(d2.Annotations) { + return false + } else { + for i := range d.Annotations { + if d.Annotations[i] != d2.Annotations[i] { + return false + } + } + } + return true +} + +// Same indicates two descriptors point to the same CAS object. +// This verifies the digest, media type, and size all match. +func (d Descriptor) Same(d2 Descriptor) bool { + if d.Digest != d2.Digest || d.Size != d2.Size { + return false + } + // loosen the check on media type since this can be converted from a build + if d.MediaType != d2.MediaType { + if _, ok := mtToOCI[d.MediaType]; !ok { + return false + } else if mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] { + return false + } + } + return true +} + +func (d Descriptor) MarshalPrettyTW(tw *tabwriter.Writer, prefix string) error { + fmt.Fprintf(tw, "%sDigest:\t%s\n", prefix, string(d.Digest)) + fmt.Fprintf(tw, "%sMediaType:\t%s\n", prefix, d.MediaType) + if d.ArtifactType != "" { + fmt.Fprintf(tw, "%sArtifactType:\t%s\n", prefix, d.ArtifactType) + } + switch d.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // skip printing size for descriptors to manifests + default: + if d.Size > 100000 { + fmt.Fprintf(tw, "%sSize:\t%s\n", prefix, units.HumanSize(float64(d.Size))) + } else { + fmt.Fprintf(tw, "%sSize:\t%dB\n", prefix, d.Size) + } + } + if p := d.Platform; p != nil && p.OS != "" { + fmt.Fprintf(tw, "%sPlatform:\t%s\n", prefix, p.String()) + if p.OSVersion != "" { + fmt.Fprintf(tw, "%sOSVersion:\t%s\n", prefix, p.OSVersion) + } + if len(p.OSFeatures) > 0 { + fmt.Fprintf(tw, "%sOSFeatures:\t%s\n", prefix, strings.Join(p.OSFeatures, ", ")) + } + } + if len(d.URLs) > 0 { + fmt.Fprintf(tw, "%sURLs:\t%s\n", prefix, strings.Join(d.URLs, ", ")) + } + if d.Annotations != nil { + fmt.Fprintf(tw, "%sAnnotations:\t\n", prefix) + for k, v := range d.Annotations { + fmt.Fprintf(tw, "%s %s:\t%s\n", prefix, k, v) + } + } + return nil +} + +// MatchOpt defines conditions for a match descriptor. +type MatchOpt struct { + Platform *platform.Platform // Platform to match including compatible platforms (darwin/arm64 matches linux/arm64) + ArtifactType string // Match ArtifactType in the descriptor + Annotations map[string]string // Match each of the specified annotations and their value, an empty value verifies the key is set + SortAnnotation string // Sort the results by an annotation, string based comparison, descriptors without the annotation are sorted last + SortDesc bool // Set to true to sort in descending order +} + +// Match returns true if the descriptor matches the options, including compatible platforms. +func (d Descriptor) Match(opt MatchOpt) bool { + if opt.ArtifactType != "" && d.ArtifactType != opt.ArtifactType { + return false + } + if opt.Annotations != nil && len(opt.Annotations) > 0 { + if d.Annotations == nil { + return false + } + for k, v := range opt.Annotations { + if dv, ok := d.Annotations[k]; !ok || (v != "" && v != dv) { + return false + } + } + } + if opt.Platform != nil { + if d.Platform == nil { + return false + } + if !platform.Compatible(*opt.Platform, *d.Platform) { + return false + } + } + return true +} + +// DescriptorListFilter returns a list of descriptors from the list matching the search options. +// When opt.SortAnnotation is set, the order of descriptors with matching annotations is undefined. +func DescriptorListFilter(dl []Descriptor, opt MatchOpt) []Descriptor { + ret := []Descriptor{} + for _, d := range dl { + if d.Match(opt) { + ret = append(ret, d) + } + } + if opt.SortAnnotation != "" { + sort.Slice(ret, func(i, j int) bool { + // if annotations are not defined, sort to the very end + if ret[i].Annotations == nil { + return false + } + if _, ok := ret[i].Annotations[opt.SortAnnotation]; !ok { + return false + } + if ret[j].Annotations == nil { + return true + } + if _, ok := ret[j].Annotations[opt.SortAnnotation]; !ok { + return true + } + // else sort by string + if strings.Compare(ret[i].Annotations[opt.SortAnnotation], ret[j].Annotations[opt.SortAnnotation]) < 0 { + return !opt.SortDesc + } + return opt.SortDesc + }) + } + return ret +} + +// DescriptorListSearch returns the first descriptor from the list matching the search options. +func DescriptorListSearch(dl []Descriptor, opt MatchOpt) (Descriptor, error) { + if opt.ArtifactType != "" || opt.SortAnnotation != "" || len(opt.Annotations) > 0 { + dl = DescriptorListFilter(dl, opt) + } + var ret Descriptor + var retPlat platform.Platform + if len(dl) == 0 { + return ret, errs.ErrNotFound + } + if opt.Platform == nil { + return dl[0], nil + } + found := false + comp := platform.NewCompare(*opt.Platform) + for _, d := range dl { + if d.Platform == nil { + continue + } + if comp.Better(*d.Platform, retPlat) { + found = true + ret = d + retPlat = *d.Platform + } + } + if !found { + return ret, errs.ErrNotFound + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go index bdac5949b..923797305 100644 --- a/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go +++ b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go @@ -9,20 +9,21 @@ import ( "github.com/docker/libtrust" "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" ) var ( // ManifestSchemaVersion provides a pre-initialized version structure schema1 manifests. ManifestSchemaVersion = docker.Versioned{ SchemaVersion: 1, - MediaType: types.MediaTypeDocker1Manifest, + MediaType: mediatype.Docker1Manifest, } // ManifestSignedSchemaVersion provides a pre-initialized version structure schema1 signed manifests. ManifestSignedSchemaVersion = docker.Versioned{ SchemaVersion: 1, - MediaType: types.MediaTypeDocker1ManifestSigned, + MediaType: mediatype.Docker1ManifestSigned, } ) @@ -118,7 +119,7 @@ func (sm *SignedManifest) MarshalJSON() ([]byte, error) { // Payload returns the signed content of the signed manifest. func (sm SignedManifest) Payload() (string, []byte, error) { - return types.MediaTypeDocker1ManifestSigned, sm.all, nil + return mediatype.Docker1ManifestSigned, sm.all, nil } // Signatures returns the signatures as provided by (*libtrust.JSONSignature).Signatures. diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go index 659640b13..16bb8cb51 100644 --- a/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go @@ -1,14 +1,15 @@ package schema2 import ( - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" ) // ManifestSchemaVersion is a pre-configured versioned field for manifests var ManifestSchemaVersion = docker.Versioned{ SchemaVersion: 2, - MediaType: types.MediaTypeDocker2Manifest, + MediaType: mediatype.Docker2Manifest, } // Manifest defines a schema2 manifest. @@ -16,11 +17,11 @@ type Manifest struct { docker.Versioned // Config references the image configuration as a blob. - Config types.Descriptor `json:"config"` + Config descriptor.Descriptor `json:"config"` // Layers lists descriptors for the layers referenced by the // configuration. - Layers []types.Descriptor `json:"layers"` + Layers []descriptor.Descriptor `json:"layers"` // Annotations contains arbitrary metadata for the image index. // Note, this is not a defined docker schema2 field. diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go index 613db51a4..8fa804712 100644 --- a/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go @@ -1,14 +1,15 @@ package schema2 import ( - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" ) // ManifestListSchemaVersion is a pre-configured versioned field for manifest lists var ManifestListSchemaVersion = docker.Versioned{ SchemaVersion: 2, - MediaType: types.MediaTypeDocker2ManifestList, + MediaType: mediatype.Docker2ManifestList, } // ManifestList references manifests for various platforms. @@ -16,7 +17,7 @@ type ManifestList struct { docker.Versioned // Manifests lists descriptors in the manifest list - Manifests []types.Descriptor `json:"manifests"` + Manifests []descriptor.Descriptor `json:"manifests"` // Annotations contains arbitrary metadata for the image index. // Note, this is not a defined docker schema2 field. diff --git a/vendor/github.com/regclient/regclient/types/error.go b/vendor/github.com/regclient/regclient/types/error.go index b56a7e050..8841d0153 100644 --- a/vendor/github.com/regclient/regclient/types/error.go +++ b/vendor/github.com/regclient/regclient/types/error.go @@ -1,76 +1,150 @@ package types -import ( - "errors" - "fmt" - "io/fs" -) +import "github.com/regclient/regclient/types/errs" var ( // ErrAllRequestsFailed when there are no mirrors left to try - ErrAllRequestsFailed = errors.New("all requests failed") + // + // Deprecated: replace with [errs.ErrAllRequestsFailed]. + ErrAllRequestsFailed = errs.ErrAllRequestsFailed // ErrAPINotFound if an api is not available for the host - ErrAPINotFound = errors.New("API not found") + // + // Deprecated: replace with [errs.ErrAPINotFound]. + ErrAPINotFound = errs.ErrAPINotFound // ErrBackoffLimit maximum backoff attempts reached - ErrBackoffLimit = errors.New("backoff limit reached") + // + // Deprecated: replace with [errs.ErrBackoffLimit]. + ErrBackoffLimit = errs.ErrBackoffLimit // ErrCanceled if the context was canceled - ErrCanceled = errors.New("context was canceled") + // + // Deprecated: replace with [errs.ErrCanceled]. + ErrCanceled = errs.ErrCanceled // ErrDigestMismatch if the expected digest wasn't received - ErrDigestMismatch = errors.New("digest mismatch") + // + // Deprecated: replace with [errs.ErrDigestMismatch]. + ErrDigestMismatch = errs.ErrDigestMismatch // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header - ErrEmptyChallenge = errors.New("empty challenge header") + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + ErrEmptyChallenge = errs.ErrEmptyChallenge // ErrFileDeleted indicates a requested file has been deleted - ErrFileDeleted = errors.New("file deleted") + // + // Deprecated: replace with [errs.ErrFileDeleted]. + ErrFileDeleted = errs.ErrFileDeleted // ErrFileNotFound indicates a requested file is not found - ErrFileNotFound = fmt.Errorf("file not found%.0w", fs.ErrNotExist) + // + // Deprecated: replace with [errs.ErrFileNotFound]. + ErrFileNotFound = errs.ErrFileNotFound // ErrHTTPStatus if the http status code was unexpected - ErrHTTPStatus = errors.New("unexpected http status code") + // + // Deprecated: replace with [errs.ErrHTTPStatus]. + ErrHTTPStatus = errs.ErrHTTPStatus // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header - ErrInvalidChallenge = errors.New("invalid challenge header") + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + ErrInvalidChallenge = errs.ErrInvalidChallenge + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + // + // Deprecated: replace with [errs.ErrInvalidReference]. + ErrInvalidReference = errs.ErrInvalidReference + // ErrLoopDetected indicates a child node points back to the parent + // + // Deprecated: replace with [errs.ErrLoopDetected]. + ErrLoopDetected = errs.ErrLoopDetected // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first - ErrManifestNotSet = errors.New("manifest not set") + // + // Deprecated: replace with [errs.ErrManifestNotSet]. + ErrManifestNotSet = errs.ErrManifestNotSet // ErrMissingAnnotation returned when a needed annotation is not found - ErrMissingAnnotation = errors.New("annotation is missing") + // + // Deprecated: replace with [errs.ErrMissingAnnotation]. + ErrMissingAnnotation = errs.ErrMissingAnnotation // ErrMissingDigest returned when image reference does not include a digest - ErrMissingDigest = errors.New("digest missing from image reference") + // + // Deprecated: replace with [errs.ErrMissingDigest]. + ErrMissingDigest = errs.ErrMissingDigest // ErrMissingLocation returned when the location header is missing - ErrMissingLocation = errors.New("location header missing") + // + // Deprecated: replace with [errs.ErrMissingLocation]. + ErrMissingLocation = errs.ErrMissingLocation // ErrMissingName returned when name missing for host - ErrMissingName = errors.New("name missing") + // + // Deprecated: replace with [errs.ErrMissingName]. + ErrMissingName = errs.ErrMissingName // ErrMissingTag returned when image reference does not include a tag - ErrMissingTag = errors.New("tag missing from image reference") + // + // Deprecated: replace with [errs.ErrMissingTag]. + ErrMissingTag = errs.ErrMissingTag // ErrMissingTagOrDigest returned when image reference does not include a tag or digest - ErrMissingTagOrDigest = errors.New("tag or Digest missing from image reference") + // + // Deprecated: replace with [errs.ErrMissingTagOrDigest]. + ErrMissingTagOrDigest = errs.ErrMissingTagOrDigest // ErrMismatch returned when a comparison detects a difference - ErrMismatch = errors.New("content does not match") + // + // Deprecated: replace with [errs.ErrMismatch]. + ErrMismatch = errs.ErrMismatch // ErrMountReturnedLocation when a blob mount fails but a location header is received - ErrMountReturnedLocation = errors.New("blob mount returned a location to upload") + // + // Deprecated: replace with [errs.ErrMountReturnedLocation]. + ErrMountReturnedLocation = errs.ErrMountReturnedLocation // ErrNoNewChallenge indicates a challenge update did not result in any change - ErrNoNewChallenge = errors.New("no new challenge") + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + ErrNoNewChallenge = errs.ErrNoNewChallenge // ErrNotFound isn't there, search for your value elsewhere - ErrNotFound = errors.New("not found") + // + // Deprecated: replace with [errs.ErrNotFound]. + ErrNotFound = errs.ErrNotFound // ErrNotImplemented returned when method has not been implemented yet - ErrNotImplemented = errors.New("not implemented") + // + // Deprecated: replace with [errs.ErrNotImplemented]. + ErrNotImplemented = errs.ErrNotImplemented + // ErrNotRetryable indicates the process cannot be retried + // + // Deprecated: replace with [errs.ErrNotRetryable]. + ErrNotRetryable = errs.ErrNotRetryable // ErrParsingFailed when a string cannot be parsed - ErrParsingFailed = errors.New("parsing failed") + // + // Deprecated: replace with [errs.ErrParsingFailed]. + ErrParsingFailed = errs.ErrParsingFailed // ErrRetryNeeded indicates a request needs to be retried - ErrRetryNeeded = errors.New("retry needed") + // + // Deprecated: replace with [errs.ErrRetryNeeded]. + ErrRetryNeeded = errs.ErrRetryNeeded + // ErrShortRead if contents are less than expected the size + // + // Deprecated: replace with [errs.ErrShortRead]. + ErrShortRead = errs.ErrShortRead + // ErrSizeLimitExceeded if contents exceed the size limit + // + // Deprecated: replace with [errs.ErrSizeLimitExceeded]. + ErrSizeLimitExceeded = errs.ErrSizeLimitExceeded // ErrUnavailable when a requested value is not available - ErrUnavailable = errors.New("unavailable") + // + // Deprecated: replace with [errs.ErrUnavailable]. + ErrUnavailable = errs.ErrUnavailable // ErrUnsupported indicates the request was unsupported - ErrUnsupported = errors.New("unsupported") + // + // Deprecated: replace with [errs.ErrUnsupported]. + ErrUnsupported = errs.ErrUnsupported // ErrUnsupportedAPI happens when an API is not supported on a registry - ErrUnsupportedAPI = errors.New("unsupported API") + // + // Deprecated: replace with [errs.ErrUnsupportedAPI]. + ErrUnsupportedAPI = errs.ErrUnsupportedAPI // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports - ErrUnsupportedConfigVersion = errors.New("unsupported config version") + // + // Deprecated: replace with [errs.ErrUnsupportedConfigVersion]. + ErrUnsupportedConfigVersion = errs.ErrUnsupportedConfigVersion // ErrUnsupportedMediaType returned when media type is unknown or unsupported - ErrUnsupportedMediaType = errors.New("unsupported media type") -) - -// custom HTTP errors extend the ErrHTTPStatus error -var ( + // + // Deprecated: replace with [errs.ErrUnsupportedMediaType]. + ErrUnsupportedMediaType = errs.ErrUnsupportedMediaType // ErrHTTPRateLimit when requests exceed server rate limit - ErrHTTPRateLimit = fmt.Errorf("rate limit exceeded%.0w", ErrHTTPStatus) + // + // Deprecated: replace with [errs.ErrHTTPRateLimit]. + ErrHTTPRateLimit = errs.ErrHTTPRateLimit // ErrHTTPUnauthorized when authentication fails - ErrHTTPUnauthorized = fmt.Errorf("unauthorized%.0w", ErrHTTPStatus) + // + // Deprecated: replace with [errs.ErrHTTPUnauthorized]. + ErrHTTPUnauthorized = errs.ErrHTTPUnauthorized ) diff --git a/vendor/github.com/regclient/regclient/types/errs/error.go b/vendor/github.com/regclient/regclient/types/errs/error.go new file mode 100644 index 000000000..fc3e1dd10 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/errs/error.go @@ -0,0 +1,87 @@ +// Package errs is used for predefined error values. +package errs + +import ( + "errors" + "fmt" + "io/fs" +) + +var ( + // ErrAllRequestsFailed when there are no mirrors left to try + ErrAllRequestsFailed = errors.New("all requests failed") + // ErrAPINotFound if an api is not available for the host + ErrAPINotFound = errors.New("API not found") + // ErrBackoffLimit maximum backoff attempts reached + ErrBackoffLimit = errors.New("backoff limit reached") + // ErrCanceled if the context was canceled + ErrCanceled = errors.New("context was canceled") + // ErrDigestMismatch if the expected digest wasn't received + ErrDigestMismatch = errors.New("digest mismatch") + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrEmptyChallenge = errors.New("empty challenge header") + // ErrFileDeleted indicates a requested file has been deleted + ErrFileDeleted = errors.New("file deleted") + // ErrFileNotFound indicates a requested file is not found + ErrFileNotFound = fmt.Errorf("file not found%.0w", fs.ErrNotExist) + // ErrHTTPStatus if the http status code was unexpected + ErrHTTPStatus = errors.New("unexpected http status code") + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrInvalidChallenge = errors.New("invalid challenge header") + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + ErrInvalidReference = errors.New("invalid reference") + // ErrLoopDetected indicates a child node points back to the parent + ErrLoopDetected = errors.New("loop detected") + // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first + ErrManifestNotSet = errors.New("manifest not set") + // ErrMissingAnnotation returned when a needed annotation is not found + ErrMissingAnnotation = errors.New("annotation is missing") + // ErrMissingDigest returned when image reference does not include a digest + ErrMissingDigest = errors.New("digest missing from image reference") + // ErrMissingLocation returned when the location header is missing + ErrMissingLocation = errors.New("location header missing") + // ErrMissingName returned when name missing for host + ErrMissingName = errors.New("name missing") + // ErrMissingTag returned when image reference does not include a tag + ErrMissingTag = errors.New("tag missing from image reference") + // ErrMissingTagOrDigest returned when image reference does not include a tag or digest + ErrMissingTagOrDigest = errors.New("tag or Digest missing from image reference") + // ErrMismatch returned when a comparison detects a difference + ErrMismatch = errors.New("content does not match") + // ErrMountReturnedLocation when a blob mount fails but a location header is received + ErrMountReturnedLocation = errors.New("blob mount returned a location to upload") + // ErrNoNewChallenge indicates a challenge update did not result in any change + ErrNoNewChallenge = errors.New("no new challenge") + // ErrNotFound isn't there, search for your value elsewhere + ErrNotFound = errors.New("not found") + // ErrNotImplemented returned when method has not been implemented yet + ErrNotImplemented = errors.New("not implemented") + // ErrNotRetryable indicates the process cannot be retried + ErrNotRetryable = errors.New("not retryable") + // ErrParsingFailed when a string cannot be parsed + ErrParsingFailed = errors.New("parsing failed") + // ErrRetryNeeded indicates a request needs to be retried + ErrRetryNeeded = errors.New("retry needed") + // ErrShortRead if contents are less than expected the size + ErrShortRead = errors.New("short read") + // ErrSizeLimitExceeded if contents exceed the size limit + ErrSizeLimitExceeded = errors.New("size limit exceeded") + // ErrUnavailable when a requested value is not available + ErrUnavailable = errors.New("unavailable") + // ErrUnsupported indicates the request was unsupported + ErrUnsupported = errors.New("unsupported") + // ErrUnsupportedAPI happens when an API is not supported on a registry + ErrUnsupportedAPI = errors.New("unsupported API") + // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports + ErrUnsupportedConfigVersion = errors.New("unsupported config version") + // ErrUnsupportedMediaType returned when media type is unknown or unsupported + ErrUnsupportedMediaType = errors.New("unsupported media type") +) + +// custom HTTP errors extend the ErrHTTPStatus error +var ( + // ErrHTTPRateLimit when requests exceed server rate limit + ErrHTTPRateLimit = fmt.Errorf("rate limit exceeded%.0w", ErrHTTPStatus) + // ErrHTTPUnauthorized when authentication fails + ErrHTTPUnauthorized = fmt.Errorf("unauthorized%.0w", ErrHTTPStatus) +) diff --git a/vendor/github.com/regclient/regclient/types/manifest/common.go b/vendor/github.com/regclient/regclient/types/manifest/common.go index a0fdf6a79..70139b786 100644 --- a/vendor/github.com/regclient/regclient/types/manifest/common.go +++ b/vendor/github.com/regclient/regclient/types/manifest/common.go @@ -10,13 +10,16 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/ref" ) type common struct { r ref.Ref - desc types.Descriptor + desc descriptor.Descriptor manifSet bool ratelimit types.RateLimit rawHeader http.Header @@ -29,7 +32,7 @@ func (m *common) GetDigest() digest.Digest { } // GetDescriptor returns the descriptor -func (m *common) GetDescriptor() types.Descriptor { +func (m *common) GetDescriptor() descriptor.Descriptor { return m.desc } @@ -73,7 +76,7 @@ func (m *common) IsSet() bool { // RawBody returns the raw body from the manifest if available. func (m *common) RawBody() ([]byte, error) { if len(m.rawBody) == 0 { - return m.rawBody, types.ErrManifestNotSet + return m.rawBody, errs.ErrManifestNotSet } return m.rawBody, nil } diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker1.go b/vendor/github.com/regclient/regclient/types/manifest/docker1.go index a5c50887e..00beae2f0 100644 --- a/vendor/github.com/regclient/regclient/types/manifest/docker1.go +++ b/vendor/github.com/regclient/regclient/types/manifest/docker1.go @@ -11,15 +11,18 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/types" + + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker/schema1" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/platform" ) const ( - // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. MediaTypeDocker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" - // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. MediaTypeDocker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" ) @@ -32,47 +35,47 @@ type docker1SignedManifest struct { schema1.SignedManifest } -func (m *docker1Manifest) GetConfig() (types.Descriptor, error) { - return types.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1Manifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1Manifest) GetConfigDigest() (digest.Digest, error) { - return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1SignedManifest) GetConfig() (types.Descriptor, error) { - return types.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1SignedManifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1SignedManifest) GetConfigDigest() (digest.Digest, error) { - return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1Manifest) GetManifestList() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1SignedManifest) GetManifestList() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1SignedManifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1Manifest) GetLayers() ([]types.Descriptor, error) { +func (m *docker1Manifest) GetLayers() ([]descriptor.Descriptor, error) { if !m.manifSet { - return []types.Descriptor{}, types.ErrManifestNotSet + return []descriptor.Descriptor{}, errs.ErrManifestNotSet } - var dl []types.Descriptor + var dl []descriptor.Descriptor for _, sd := range m.FSLayers { - dl = append(dl, types.Descriptor{ + dl = append(dl, descriptor.Descriptor{ Digest: sd.BlobSum, }) } return dl, nil } -func (m *docker1SignedManifest) GetLayers() ([]types.Descriptor, error) { +func (m *docker1SignedManifest) GetLayers() ([]descriptor.Descriptor, error) { if !m.manifSet { - return []types.Descriptor{}, types.ErrManifestNotSet + return []descriptor.Descriptor{}, errs.ErrManifestNotSet } - var dl []types.Descriptor + var dl []descriptor.Descriptor for _, sd := range m.FSLayers { - dl = append(dl, types.Descriptor{ + dl = append(dl, descriptor.Descriptor{ Digest: sd.BlobSum, }) } @@ -86,23 +89,31 @@ func (m *docker1SignedManifest) GetOrig() interface{} { return m.SignedManifest } -func (m *docker1Manifest) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1SignedManifest) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1SignedManifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1Manifest) GetPlatformList() ([]*platform.Platform, error) { - return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1SignedManifest) GetPlatformList() ([]*platform.Platform, error) { - return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1Manifest) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { @@ -114,7 +125,7 @@ func (m *docker1Manifest) MarshalJSON() ([]byte, error) { func (m *docker1SignedManifest) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } return m.SignedManifest.MarshalJSON() @@ -136,8 +147,8 @@ func (m *docker1Manifest) MarshalPretty() ([]byte, error) { for _, d := range m.FSLayers { fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) } - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } func (m *docker1SignedManifest) MarshalPretty() ([]byte, error) { if m == nil { @@ -155,34 +166,34 @@ func (m *docker1SignedManifest) MarshalPretty() ([]byte, error) { for _, d := range m.FSLayers { fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) } - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } -func (m *docker1Manifest) SetConfig(d types.Descriptor) error { - return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1Manifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1SignedManifest) SetConfig(d types.Descriptor) error { - return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1SignedManifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1Manifest) SetLayers(dl []types.Descriptor) error { - return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1Manifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker1SignedManifest) SetLayers(dl []types.Descriptor) error { - return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker1SignedManifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker1Manifest) SetOrig(origIn interface{}) error { orig, ok := origIn.(schema1.Manifest) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeDocker1Manifest { + if orig.MediaType != mediatype.Docker1Manifest { // TODO: error? - orig.MediaType = types.MediaTypeDocker1Manifest + orig.MediaType = mediatype.Docker1Manifest } mj, err := json.Marshal(orig) if err != nil { @@ -190,8 +201,8 @@ func (m *docker1Manifest) SetOrig(origIn interface{}) error { } m.manifSet = true m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeDocker1Manifest, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1Manifest, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } @@ -203,11 +214,11 @@ func (m *docker1Manifest) SetOrig(origIn interface{}) error { func (m *docker1SignedManifest) SetOrig(origIn interface{}) error { orig, ok := origIn.(schema1.SignedManifest) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeDocker1ManifestSigned { + if orig.MediaType != mediatype.Docker1ManifestSigned { // TODO: error? - orig.MediaType = types.MediaTypeDocker1ManifestSigned + orig.MediaType = mediatype.Docker1ManifestSigned } mj, err := json.Marshal(orig) if err != nil { @@ -215,8 +226,8 @@ func (m *docker1SignedManifest) SetOrig(origIn interface{}) error { } m.manifSet = true m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeDocker1ManifestSigned, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1ManifestSigned, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker2.go b/vendor/github.com/regclient/regclient/types/manifest/docker2.go index 0a1783ca7..fcaf19f96 100644 --- a/vendor/github.com/regclient/regclient/types/manifest/docker2.go +++ b/vendor/github.com/regclient/regclient/types/manifest/docker2.go @@ -12,17 +12,20 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + "github.com/regclient/regclient/internal/units" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" "github.com/regclient/regclient/types/platform" ) const ( // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry - MediaTypeDocker2Manifest = types.MediaTypeDocker2Manifest + MediaTypeDocker2Manifest = mediatype.Docker2Manifest // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry - MediaTypeDocker2ManifestList = types.MediaTypeDocker2ManifestList + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList ) type docker2Manifest struct { @@ -36,53 +39,53 @@ type docker2ManifestList struct { func (m *docker2Manifest) GetAnnotations() (map[string]string, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Annotations, nil } -func (m *docker2Manifest) GetConfig() (types.Descriptor, error) { +func (m *docker2Manifest) GetConfig() (descriptor.Descriptor, error) { if !m.manifSet { - return types.Descriptor{}, types.ErrManifestNotSet + return descriptor.Descriptor{}, errs.ErrManifestNotSet } return m.Config, nil } func (m *docker2Manifest) GetConfigDigest() (digest.Digest, error) { if !m.manifSet { - return digest.Digest(""), types.ErrManifestNotSet + return digest.Digest(""), errs.ErrManifestNotSet } return m.Config.Digest, nil } func (m *docker2ManifestList) GetAnnotations() (map[string]string, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Annotations, nil } -func (m *docker2ManifestList) GetConfig() (types.Descriptor, error) { - return types.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker2ManifestList) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker2ManifestList) GetConfigDigest() (digest.Digest, error) { - return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker2Manifest) GetManifestList() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker2Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker2ManifestList) GetManifestList() ([]types.Descriptor, error) { +func (m *docker2ManifestList) GetManifestList() ([]descriptor.Descriptor, error) { if !m.manifSet { - return []types.Descriptor{}, types.ErrManifestNotSet + return []descriptor.Descriptor{}, errs.ErrManifestNotSet } return m.Manifests, nil } -func (m *docker2Manifest) GetLayers() ([]types.Descriptor, error) { +func (m *docker2Manifest) GetLayers() ([]descriptor.Descriptor, error) { if !m.manifSet { - return []types.Descriptor{}, types.ErrManifestNotSet + return []descriptor.Descriptor{}, errs.ErrManifestNotSet } return m.Layers, nil } -func (m *docker2ManifestList) GetLayers() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker2ManifestList) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker2Manifest) GetOrig() interface{} { @@ -92,15 +95,25 @@ func (m *docker2ManifestList) GetOrig() interface{} { return m.ManifestList } -func (m *docker2Manifest) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *docker2Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *docker2ManifestList) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return getPlatformDesc(p, m.Manifests) +func (m *docker2ManifestList) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil } func (m *docker2Manifest) GetPlatformList() ([]*platform.Platform, error) { - return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *docker2ManifestList) GetPlatformList() ([]*platform.Platform, error) { dl, err := m.GetManifestList() @@ -110,9 +123,21 @@ func (m *docker2ManifestList) GetPlatformList() ([]*platform.Platform, error) { return getPlatformList(dl) } +// GetSize returns the size in bytes of all layers +func (m *docker2Manifest) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + func (m *docker2Manifest) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { return m.rawBody, nil @@ -121,7 +146,7 @@ func (m *docker2Manifest) MarshalJSON() ([]byte, error) { } func (m *docker2ManifestList) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { return m.rawBody, nil @@ -172,8 +197,8 @@ func (m *docker2Manifest) MarshalPretty() ([]byte, error) { return []byte{}, err } } - tw.Flush() - return buf.Bytes(), nil + err = tw.Flush() + return buf.Bytes(), err } func (m *docker2ManifestList) MarshalPretty() ([]byte, error) { if m == nil { @@ -212,50 +237,58 @@ func (m *docker2ManifestList) MarshalPretty() ([]byte, error) { return []byte{}, err } } - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } func (m *docker2Manifest) SetAnnotation(key, val string) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } if m.Annotations == nil { m.Annotations = map[string]string{} } - m.Annotations[key] = val + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } return m.updateDesc() } func (m *docker2ManifestList) SetAnnotation(key, val string) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } if m.Annotations == nil { m.Annotations = map[string]string{} } - m.Annotations[key] = val + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } return m.updateDesc() } -func (m *docker2Manifest) SetConfig(d types.Descriptor) error { +func (m *docker2Manifest) SetConfig(d descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Config = d return m.updateDesc() } -func (m *docker2Manifest) SetLayers(dl []types.Descriptor) error { +func (m *docker2Manifest) SetLayers(dl []descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Layers = dl return m.updateDesc() } -func (m *docker2ManifestList) SetManifestList(dl []types.Descriptor) error { +func (m *docker2ManifestList) SetManifestList(dl []descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Manifests = dl return m.updateDesc() @@ -264,11 +297,11 @@ func (m *docker2ManifestList) SetManifestList(dl []types.Descriptor) error { func (m *docker2Manifest) SetOrig(origIn interface{}) error { orig, ok := origIn.(schema2.Manifest) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeDocker2Manifest { + if orig.MediaType != mediatype.Docker2Manifest { // TODO: error? - orig.MediaType = types.MediaTypeDocker2Manifest + orig.MediaType = mediatype.Docker2Manifest } m.manifSet = true m.Manifest = orig @@ -278,11 +311,11 @@ func (m *docker2Manifest) SetOrig(origIn interface{}) error { func (m *docker2ManifestList) SetOrig(origIn interface{}) error { orig, ok := origIn.(schema2.ManifestList) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeDocker2ManifestList { + if orig.MediaType != mediatype.Docker2ManifestList { // TODO: error? - orig.MediaType = types.MediaTypeDocker2ManifestList + orig.MediaType = mediatype.Docker2ManifestList } m.manifSet = true m.ManifestList = orig @@ -295,8 +328,8 @@ func (m *docker2Manifest) updateDesc() error { return err } m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeDocker2Manifest, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2Manifest, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } @@ -308,8 +341,8 @@ func (m *docker2ManifestList) updateDesc() error { return err } m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeDocker2ManifestList, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2ManifestList, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } diff --git a/vendor/github.com/regclient/regclient/types/manifest/manifest.go b/vendor/github.com/regclient/regclient/types/manifest/manifest.go index ee203e4d8..aae2cc4f2 100644 --- a/vendor/github.com/regclient/regclient/types/manifest/manifest.go +++ b/vendor/github.com/regclient/regclient/types/manifest/manifest.go @@ -10,14 +10,18 @@ import ( "strconv" "strings" - // crypto libraries included for go-digest + // Crypto libraries are included for go-digest. _ "crypto/sha256" _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/docker/schema1" "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/platform" "github.com/regclient/regclient/types/ref" @@ -26,7 +30,7 @@ import ( // Manifest interface is implemented by all supported manifests but // many calls are only supported by certain underlying media types. type Manifest interface { - GetDescriptor() types.Descriptor + GetDescriptor() descriptor.Descriptor GetOrig() interface{} GetRef() ref.Ref IsList() bool @@ -36,62 +40,68 @@ type Manifest interface { RawHeaders() (http.Header, error) SetOrig(interface{}) error - // Deprecated: GetConfig should be accessed using Imager interface - GetConfig() (types.Descriptor, error) - // Deprecated: GetLayers should be accessed using Imager interface - GetLayers() ([]types.Descriptor, error) + // Deprecated: GetConfig should be accessed using [Imager] interface. + GetConfig() (descriptor.Descriptor, error) + // Deprecated: GetLayers should be accessed using [Imager] interface. + GetLayers() ([]descriptor.Descriptor, error) - // Deprecated: GetManifestList should be accessed using Indexer interface - GetManifestList() ([]types.Descriptor, error) + // Deprecated: GetManifestList should be accessed using [Indexer] interface. + GetManifestList() ([]descriptor.Descriptor, error) - // Deprecated: GetConfigDigest should be replaced with GetConfig + // Deprecated: GetConfigDigest should be replaced with [GetConfig]. GetConfigDigest() (digest.Digest, error) - // Deprecated: GetDigest should be replaced with GetDescriptor().Digest + // Deprecated: GetDigest should be replaced with GetDescriptor().Digest, see [GetDescriptor]. GetDigest() digest.Digest - // Deprecated: GetMediaType should be replaced with GetDescriptor().MediaType + // Deprecated: GetMediaType should be replaced with GetDescriptor().MediaType, see [GetDescriptor]. GetMediaType() string - // Deprecated: GetPlatformDesc method should be replaced with manifest.GetPlatformDesc function - GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) - // Deprecated: GetPlatformList method should be replaced with manifest.GetPlatformList function + // Deprecated: GetPlatformDesc method should be replaced with [manifest.GetPlatformDesc]. + GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) + // Deprecated: GetPlatformList method should be replaced with [manifest.GetPlatformList]. GetPlatformList() ([]*platform.Platform, error) - // Deprecated: GetRateLimit method should be replaced with manifest.GetRateLimit function + // Deprecated: GetRateLimit method should be replaced with [manifest.GetRateLimit]. GetRateLimit() types.RateLimit - // Deprecated: HasRateLimit method should be replaced with manifest.HasRateLimit function + // Deprecated: HasRateLimit method should be replaced with [manifest.HasRateLimit]. HasRateLimit() bool } +// Annotator is used by manifests that support annotations. +// Note this will work for Docker manifests despite the spec not officially supporting it. type Annotator interface { GetAnnotations() (map[string]string, error) SetAnnotation(key, val string) error } +// Indexer is used by manifests that contain a manifest list. type Indexer interface { - GetManifestList() ([]types.Descriptor, error) - SetManifestList(dl []types.Descriptor) error + GetManifestList() ([]descriptor.Descriptor, error) + SetManifestList(dl []descriptor.Descriptor) error } +// Imager is used by manifests packaging an image. type Imager interface { - GetConfig() (types.Descriptor, error) - GetLayers() ([]types.Descriptor, error) - SetConfig(d types.Descriptor) error - SetLayers(dl []types.Descriptor) error + GetConfig() (descriptor.Descriptor, error) + GetLayers() ([]descriptor.Descriptor, error) + SetConfig(d descriptor.Descriptor) error + SetLayers(dl []descriptor.Descriptor) error + GetSize() (int64, error) } +// Subjecter is used by manifests that may have a subject field. type Subjecter interface { - GetSubject() (*types.Descriptor, error) - SetSubject(d *types.Descriptor) error + GetSubject() (*descriptor.Descriptor, error) + SetSubject(d *descriptor.Descriptor) error } type manifestConfig struct { r ref.Ref - desc types.Descriptor + desc descriptor.Descriptor raw []byte orig interface{} header http.Header } type Opts func(*manifestConfig) -// New creates a new manifest based on provided options +// New creates a new manifest based on provided options. func New(opts ...Opts) (Manifest, error) { mc := manifestConfig{} for _, opt := range opts { @@ -106,7 +116,7 @@ func New(opts ...Opts) (Manifest, error) { // extract fields from header where available if mc.header != nil { if c.desc.MediaType == "" { - c.desc.MediaType = mc.header.Get("Content-Type") + c.desc.MediaType = mediatype.Base(mc.header.Get("Content-Type")) } if c.desc.Size == 0 { cl, _ := strconv.Atoi(mc.header.Get("Content-Length")) @@ -123,63 +133,70 @@ func New(opts ...Opts) (Manifest, error) { return fromCommon(c) } -// WithDesc specifies the descriptor for the manifest -func WithDesc(desc types.Descriptor) Opts { +// WithDesc specifies the descriptor for the manifest. +func WithDesc(desc descriptor.Descriptor) Opts { return func(mc *manifestConfig) { mc.desc = desc } } -// WithHeader provides the headers from the response when pulling the manifest +// WithHeader provides the headers from the response when pulling the manifest. func WithHeader(header http.Header) Opts { return func(mc *manifestConfig) { mc.header = header } } -// WithOrig provides the original manifest variable +// WithOrig provides the original manifest variable. func WithOrig(orig interface{}) Opts { return func(mc *manifestConfig) { mc.orig = orig } } -// WithRaw provides the manifest bytes or HTTP response body +// WithRaw provides the manifest bytes or HTTP response body. func WithRaw(raw []byte) Opts { return func(mc *manifestConfig) { mc.raw = raw } } -// WithRef provides the reference used to get the manifest +// WithRef provides the reference used to get the manifest. func WithRef(r ref.Ref) Opts { return func(mc *manifestConfig) { mc.r = r } } -// GetDigest returns the digest from the manifest descriptor +// GetDigest returns the digest from the manifest descriptor. func GetDigest(m Manifest) digest.Digest { d := m.GetDescriptor() return d.Digest } -// GetMediaType returns the media type from the manifest descriptor +// GetMediaType returns the media type from the manifest descriptor. func GetMediaType(m Manifest) string { d := m.GetDescriptor() return d.MediaType } -// GetPlatformDesc returns the descriptor for a specific platform from an index -func GetPlatformDesc(m Manifest, p *platform.Platform) (*types.Descriptor, error) { +// GetPlatformDesc returns the descriptor for a specific platform from an index. +func GetPlatformDesc(m Manifest, p *platform.Platform) (*descriptor.Descriptor, error) { dl, err := m.GetManifestList() if err != nil { return nil, err } - return getPlatformDesc(p, dl) + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(dl, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil } -// GetPlatformList returns the list of platforms from an index +// GetPlatformList returns the list of platforms from an index. func GetPlatformList(m Manifest) ([]*platform.Platform, error) { dl, err := m.GetManifestList() if err != nil { @@ -194,7 +211,7 @@ func GetPlatformList(m Manifest) ([]*platform.Platform, error) { return l, nil } -// GetRateLimit returns the current rate limit seen in headers +// GetRateLimit returns the current rate limit seen in headers. func GetRateLimit(m Manifest) types.RateLimit { rl := types.RateLimit{} header, err := m.RawHeaders() @@ -241,16 +258,17 @@ func GetRateLimit(m Manifest) types.RateLimit { return rl } -// HasRateLimit indicates whether the rate limit is set and available +// HasRateLimit indicates whether the rate limit is set and available. func HasRateLimit(m Manifest) bool { rl := GetRateLimit(m) return rl.Set } +// OCIIndexFromAny converts manifest lists to an OCI index. func OCIIndexFromAny(orig interface{}) (v1.Index, error) { ociI := v1.Index{ Versioned: v1.IndexSchemaVersion, - MediaType: types.MediaTypeOCI1ManifestList, + MediaType: mediatype.OCI1ManifestList, } switch orig := orig.(type) { case schema2.ManifestList: @@ -264,6 +282,7 @@ func OCIIndexFromAny(orig interface{}) (v1.Index, error) { return ociI, nil } +// OCIIndexToAny converts from an OCI index back to the manifest list. func OCIIndexToAny(ociI v1.Index, origP interface{}) error { // reflect is used to handle both *interface{} and *Manifest rv := reflect.ValueOf(origP) @@ -291,10 +310,11 @@ func OCIIndexToAny(ociI v1.Index, origP interface{}) error { return nil } +// OCIManifestFromAny converts an image manifest to an OCI manifest. func OCIManifestFromAny(orig interface{}) (v1.Manifest, error) { ociM := v1.Manifest{ Versioned: v1.ManifestSchemaVersion, - MediaType: types.MediaTypeOCI1Manifest, + MediaType: mediatype.OCI1Manifest, } switch orig := orig.(type) { case schema2.Manifest: @@ -310,6 +330,7 @@ func OCIManifestFromAny(orig interface{}) (v1.Manifest, error) { return ociM, nil } +// OCIManifestToAny converts an OCI manifest back to the image manifest. func OCIManifestToAny(ociM v1.Manifest, origP interface{}) error { // reflect is used to handle both *interface{} and *Manifest rv := reflect.ValueOf(origP) @@ -364,14 +385,14 @@ func fromOrig(c common, orig interface{}) (Manifest, error) { switch mOrig := orig.(type) { case schema1.Manifest: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeDocker1Manifest + c.desc.MediaType = mediatype.Docker1Manifest m = &docker1Manifest{ common: c, Manifest: mOrig, } case schema1.SignedManifest: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeDocker1ManifestSigned + c.desc.MediaType = mediatype.Docker1ManifestSigned // recompute digest on the canonical data c.desc.Digest = digest.FromBytes(mOrig.Canonical) m = &docker1SignedManifest{ @@ -380,35 +401,35 @@ func fromOrig(c common, orig interface{}) (Manifest, error) { } case schema2.Manifest: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeDocker2Manifest + c.desc.MediaType = mediatype.Docker2Manifest m = &docker2Manifest{ common: c, Manifest: mOrig, } case schema2.ManifestList: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeDocker2ManifestList + c.desc.MediaType = mediatype.Docker2ManifestList m = &docker2ManifestList{ common: c, ManifestList: mOrig, } case v1.Manifest: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeOCI1Manifest + c.desc.MediaType = mediatype.OCI1Manifest m = &oci1Manifest{ common: c, Manifest: mOrig, } case v1.Index: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeOCI1ManifestList + c.desc.MediaType = mediatype.OCI1ManifestList m = &oci1Index{ common: c, Index: orig.(v1.Index), } case v1.ArtifactManifest: mt = mOrig.MediaType - c.desc.MediaType = types.MediaTypeOCI1Artifact + c.desc.MediaType = mediatype.OCI1Artifact m = &oci1Artifact{ common: c, ArtifactManifest: mOrig, @@ -428,6 +449,7 @@ func fromOrig(c common, orig interface{}) (Manifest, error) { return m, nil } +// fromCommon is used to create a manifest when the underlying manifest struct is not provided. func fromCommon(c common) (Manifest, error) { var err error var m Manifest @@ -436,38 +458,52 @@ func fromCommon(c common) (Manifest, error) { // extract common data from from rawBody if len(c.rawBody) > 0 { c.manifSet = true - // extract media type from body if needed + // extract media type from body, either explicitly or with duck typing if c.desc.MediaType == "" { mt := struct { - MediaType string `json:"mediaType,omitempty"` - SchemaVersion int `json:"schemaVersion,omitempty"` - Signatures []interface{} `json:"signatures,omitempty"` + MediaType string `json:"mediaType,omitempty"` + SchemaVersion int `json:"schemaVersion,omitempty"` + Signatures []interface{} `json:"signatures,omitempty"` + Manifests []descriptor.Descriptor `json:"manifests,omitempty"` + Layers []descriptor.Descriptor `json:"layers,omitempty"` }{} err = json.Unmarshal(c.rawBody, &mt) if mt.MediaType != "" { c.desc.MediaType = mt.MediaType } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { - c.desc.MediaType = types.MediaTypeDocker1ManifestSigned + c.desc.MediaType = mediatype.Docker1ManifestSigned } else if mt.SchemaVersion == 1 { - c.desc.MediaType = types.MediaTypeDocker1Manifest + c.desc.MediaType = mediatype.Docker1Manifest + } else if len(mt.Manifests) > 0 { + if strings.HasPrefix(mt.Manifests[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2ManifestList + } else { + c.desc.MediaType = mediatype.OCI1ManifestList + } + } else if len(mt.Layers) > 0 { + if strings.HasPrefix(mt.Layers[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2Manifest + } else { + c.desc.MediaType = mediatype.OCI1Manifest + } } } // compute digest - if c.desc.MediaType != types.MediaTypeDocker1ManifestSigned { + if c.desc.MediaType != mediatype.Docker1ManifestSigned { d := digest.FromBytes(c.rawBody) c.desc.Digest = d c.desc.Size = int64(len(c.rawBody)) } } switch c.desc.MediaType { - case types.MediaTypeDocker1Manifest: + case mediatype.Docker1Manifest: var mOrig schema1.Manifest if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) mt = mOrig.MediaType } m = &docker1Manifest{common: c, Manifest: mOrig} - case types.MediaTypeDocker1ManifestSigned: + case mediatype.Docker1ManifestSigned: var mOrig schema1.SignedManifest if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) @@ -477,35 +513,35 @@ func fromCommon(c common) (Manifest, error) { c.desc.Size = int64(len(mOrig.Canonical)) } m = &docker1SignedManifest{common: c, SignedManifest: mOrig} - case types.MediaTypeDocker2Manifest: + case mediatype.Docker2Manifest: var mOrig schema2.Manifest if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) mt = mOrig.MediaType } m = &docker2Manifest{common: c, Manifest: mOrig} - case types.MediaTypeDocker2ManifestList: + case mediatype.Docker2ManifestList: var mOrig schema2.ManifestList if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) mt = mOrig.MediaType } m = &docker2ManifestList{common: c, ManifestList: mOrig} - case types.MediaTypeOCI1Manifest: + case mediatype.OCI1Manifest: var mOrig v1.Manifest if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) mt = mOrig.MediaType } m = &oci1Manifest{common: c, Manifest: mOrig} - case types.MediaTypeOCI1ManifestList: + case mediatype.OCI1ManifestList: var mOrig v1.Index if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) mt = mOrig.MediaType } m = &oci1Index{common: c, Index: mOrig} - case types.MediaTypeOCI1Artifact: + case mediatype.OCI1Artifact: var mOrig v1.ArtifactManifest if len(c.rawBody) > 0 { err = json.Unmarshal(c.rawBody, &mOrig) @@ -513,7 +549,7 @@ func fromCommon(c common) (Manifest, error) { } m = &oci1Artifact{common: c, ArtifactManifest: mOrig} default: - return nil, fmt.Errorf("%w: \"%s\"", types.ErrUnsupportedMediaType, c.desc.MediaType) + return nil, fmt.Errorf("%w: \"%s\"", errs.ErrUnsupportedMediaType, c.desc.MediaType) } if err != nil { return nil, fmt.Errorf("error unmarshaling manifest for %s: %w", c.r.CommonName(), err) @@ -537,25 +573,7 @@ func verifyMT(expected, received string) error { return nil } -func getPlatformDesc(p *platform.Platform, dl []types.Descriptor) (*types.Descriptor, error) { - if p == nil { - return nil, fmt.Errorf("invalid input, platform is nil%.0w", types.ErrNotFound) - } - for _, d := range dl { - if d.Platform != nil && platform.Match(*p, *d.Platform) { - return &d, nil - } - } - // if no platforms match, fall back to searching for a compatible platform (Mac runs Linux images) - for _, d := range dl { - if d.Platform != nil && platform.Compatible(*p, *d.Platform) { - return &d, nil - } - } - return nil, fmt.Errorf("platform not found: %s%.0w", *p, types.ErrNotFound) -} - -func getPlatformList(dl []types.Descriptor) ([]*platform.Platform, error) { +func getPlatformList(dl []descriptor.Descriptor) ([]*platform.Platform, error) { var l []*platform.Platform for _, d := range dl { if d.Platform != nil { diff --git a/vendor/github.com/regclient/regclient/types/manifest/oci1.go b/vendor/github.com/regclient/regclient/types/manifest/oci1.go index 121f07e99..8d92f0e81 100644 --- a/vendor/github.com/regclient/regclient/types/manifest/oci1.go +++ b/vendor/github.com/regclient/regclient/types/manifest/oci1.go @@ -12,17 +12,20 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + "github.com/regclient/regclient/internal/units" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/platform" ) const ( // MediaTypeOCI1Manifest OCI v1 manifest media type - MediaTypeOCI1Manifest = types.MediaTypeOCI1Manifest + MediaTypeOCI1Manifest = mediatype.OCI1Manifest // MediaTypeOCI1ManifestList OCI v1 manifest list media type - MediaTypeOCI1ManifestList = types.MediaTypeOCI1ManifestList + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList ) type oci1Manifest struct { @@ -42,72 +45,72 @@ type oci1Artifact struct { func (m *oci1Manifest) GetAnnotations() (map[string]string, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Annotations, nil } -func (m *oci1Manifest) GetConfig() (types.Descriptor, error) { +func (m *oci1Manifest) GetConfig() (descriptor.Descriptor, error) { if !m.manifSet { - return types.Descriptor{}, types.ErrManifestNotSet + return descriptor.Descriptor{}, errs.ErrManifestNotSet } return m.Config, nil } func (m *oci1Manifest) GetConfigDigest() (digest.Digest, error) { if !m.manifSet { - return digest.Digest(""), types.ErrManifestNotSet + return digest.Digest(""), errs.ErrManifestNotSet } return m.Config.Digest, nil } func (m *oci1Index) GetAnnotations() (map[string]string, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Annotations, nil } -func (m *oci1Index) GetConfig() (types.Descriptor, error) { - return types.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Index) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Index) GetConfigDigest() (digest.Digest, error) { - return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Artifact) GetAnnotations() (map[string]string, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Annotations, nil } -func (m *oci1Artifact) GetConfig() (types.Descriptor, error) { - return types.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Artifact) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Artifact) GetConfigDigest() (digest.Digest, error) { - return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Manifest) GetManifestList() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Index) GetManifestList() ([]types.Descriptor, error) { +func (m *oci1Index) GetManifestList() ([]descriptor.Descriptor, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Manifests, nil } -func (m *oci1Artifact) GetManifestList() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Artifact) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Manifest) GetLayers() ([]types.Descriptor, error) { +func (m *oci1Manifest) GetLayers() ([]descriptor.Descriptor, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Layers, nil } -func (m *oci1Index) GetLayers() ([]types.Descriptor, error) { - return []types.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Index) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Artifact) GetLayers() ([]types.Descriptor, error) { +func (m *oci1Artifact) GetLayers() ([]descriptor.Descriptor, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } return m.Blobs, nil } @@ -122,22 +125,28 @@ func (m *oci1Artifact) GetOrig() interface{} { return m.ArtifactManifest } -func (m *oci1Manifest) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Index) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - dl, err := m.GetManifestList() +func (m *oci1Index) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) if err != nil { - return nil, err + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) } - return getPlatformDesc(p, dl) + return &d, nil } -func (m *oci1Artifact) GetPlatformDesc(p *platform.Platform) (*types.Descriptor, error) { - return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Artifact) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Manifest) GetPlatformList() ([]*platform.Platform, error) { - return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Index) GetPlatformList() ([]*platform.Platform, error) { dl, err := m.GetManifestList() @@ -147,12 +156,12 @@ func (m *oci1Index) GetPlatformList() ([]*platform.Platform, error) { return getPlatformList(dl) } func (m *oci1Artifact) GetPlatformList() ([]*platform.Platform, error) { - return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } func (m *oci1Manifest) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { @@ -161,30 +170,28 @@ func (m *oci1Manifest) MarshalJSON() ([]byte, error) { return json.Marshal((m.Manifest)) } -func (m *oci1Manifest) GetSubject() (*types.Descriptor, error) { +func (m *oci1Manifest) GetSubject() (*descriptor.Descriptor, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet - } - // TODO: remove fall back support for refers in a future release - if m.Manifest.Subject == nil && m.Manifest.Refers != nil { - return m.Manifest.Refers, nil + return nil, errs.ErrManifestNotSet } return m.Manifest.Subject, nil } -func (m *oci1Artifact) GetSubject() (*types.Descriptor, error) { +func (m *oci1Index) GetSubject() (*descriptor.Descriptor, error) { if !m.manifSet { - return nil, types.ErrManifestNotSet + return nil, errs.ErrManifestNotSet } - // TODO: remove fall back support for refers in a future release - if m.ArtifactManifest.Subject == nil && m.ArtifactManifest.Refers != nil { - return m.ArtifactManifest.Refers, nil + return m.Index.Subject, nil +} +func (m *oci1Artifact) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet } return m.ArtifactManifest.Subject, nil } func (m *oci1Index) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { @@ -195,7 +202,7 @@ func (m *oci1Index) MarshalJSON() ([]byte, error) { } func (m *oci1Artifact) MarshalJSON() ([]byte, error) { if !m.manifSet { - return []byte{}, types.ErrManifestNotSet + return []byte{}, errs.ErrManifestNotSet } if len(m.rawBody) > 0 { @@ -215,6 +222,9 @@ func (m *oci1Manifest) MarshalPretty() ([]byte, error) { fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) } fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) if m.Annotations != nil && len(m.Annotations) > 0 { fmt.Fprintf(tw, "Annotations:\t\n") @@ -256,8 +266,8 @@ func (m *oci1Manifest) MarshalPretty() ([]byte, error) { return []byte{}, err } } - tw.Flush() - return buf.Bytes(), nil + err = tw.Flush() + return buf.Bytes(), err } func (m *oci1Index) MarshalPretty() ([]byte, error) { if m == nil { @@ -269,6 +279,9 @@ func (m *oci1Index) MarshalPretty() ([]byte, error) { fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) } fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) if m.Annotations != nil && len(m.Annotations) > 0 { fmt.Fprintf(tw, "Annotations:\t\n") @@ -296,8 +309,16 @@ func (m *oci1Index) MarshalPretty() ([]byte, error) { return []byte{}, err } } - tw.Flush() - return buf.Bytes(), nil + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err } func (m *oci1Artifact) MarshalPretty() ([]byte, error) { if m == nil { @@ -309,6 +330,7 @@ func (m *oci1Artifact) MarshalPretty() ([]byte, error) { fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) } fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) if m.Annotations != nil && len(m.Annotations) > 0 { fmt.Fprintf(tw, "Annotations:\t\n") @@ -344,72 +366,108 @@ func (m *oci1Artifact) MarshalPretty() ([]byte, error) { return []byte{}, err } } - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } func (m *oci1Manifest) SetAnnotation(key, val string) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } if m.Annotations == nil { m.Annotations = map[string]string{} } - m.Annotations[key] = val + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } return m.updateDesc() } func (m *oci1Index) SetAnnotation(key, val string) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } if m.Annotations == nil { m.Annotations = map[string]string{} } - m.Annotations[key] = val + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } return m.updateDesc() } func (m *oci1Artifact) SetAnnotation(key, val string) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } if m.Annotations == nil { m.Annotations = map[string]string{} } - m.Annotations[key] = val + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } return m.updateDesc() } -func (m *oci1Artifact) SetConfig(d types.Descriptor) error { - return fmt.Errorf("set config not available for media type %s%.0w", m.desc.MediaType, types.ErrUnsupportedMediaType) +func (m *oci1Artifact) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set config not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) } -func (m *oci1Manifest) SetConfig(d types.Descriptor) error { +func (m *oci1Manifest) SetConfig(d descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Config = d return m.updateDesc() } -func (m *oci1Artifact) SetLayers(dl []types.Descriptor) error { +func (m *oci1Artifact) SetLayers(dl []descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Blobs = dl return m.updateDesc() } -func (m *oci1Manifest) SetLayers(dl []types.Descriptor) error { +// GetSize returns the size in bytes of all layers +func (m *oci1Manifest) GetSize() (int64, error) { if !m.manifSet { - return types.ErrManifestNotSet + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + +// GetSize returns the size in bytes of all layers +func (m *oci1Artifact) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Blobs { + total += d.Size + } + return total, nil +} + +func (m *oci1Manifest) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet } m.Layers = dl return m.updateDesc() } -func (m *oci1Index) SetManifestList(dl []types.Descriptor) error { +func (m *oci1Index) SetManifestList(dl []descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Manifests = dl return m.updateDesc() @@ -418,11 +476,11 @@ func (m *oci1Index) SetManifestList(dl []types.Descriptor) error { func (m *oci1Manifest) SetOrig(origIn interface{}) error { orig, ok := origIn.(v1.Manifest) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeOCI1Manifest { + if orig.MediaType != mediatype.OCI1Manifest { // TODO: error? - orig.MediaType = types.MediaTypeOCI1Manifest + orig.MediaType = mediatype.OCI1Manifest } m.manifSet = true m.Manifest = orig @@ -433,11 +491,11 @@ func (m *oci1Manifest) SetOrig(origIn interface{}) error { func (m *oci1Index) SetOrig(origIn interface{}) error { orig, ok := origIn.(v1.Index) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeOCI1ManifestList { + if orig.MediaType != mediatype.OCI1ManifestList { // TODO: error? - orig.MediaType = types.MediaTypeOCI1ManifestList + orig.MediaType = mediatype.OCI1ManifestList } m.manifSet = true m.Index = orig @@ -445,29 +503,36 @@ func (m *oci1Index) SetOrig(origIn interface{}) error { return m.updateDesc() } -func (m *oci1Artifact) SetSubject(d *types.Descriptor) error { +func (m *oci1Artifact) SetSubject(d *descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.ArtifactManifest.Subject = d return m.updateDesc() } -func (m *oci1Manifest) SetSubject(d *types.Descriptor) error { +func (m *oci1Manifest) SetSubject(d *descriptor.Descriptor) error { if !m.manifSet { - return types.ErrManifestNotSet + return errs.ErrManifestNotSet } m.Manifest.Subject = d return m.updateDesc() } +func (m *oci1Index) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Index.Subject = d + return m.updateDesc() +} func (m *oci1Artifact) SetOrig(origIn interface{}) error { orig, ok := origIn.(v1.ArtifactManifest) if !ok { - return types.ErrUnsupportedMediaType + return errs.ErrUnsupportedMediaType } - if orig.MediaType != types.MediaTypeOCI1Artifact { + if orig.MediaType != mediatype.OCI1Artifact { // TODO: error? - orig.MediaType = types.MediaTypeOCI1Artifact + orig.MediaType = mediatype.OCI1Artifact } m.manifSet = true m.ArtifactManifest = orig @@ -481,8 +546,8 @@ func (m *oci1Manifest) updateDesc() error { return err } m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeOCI1Manifest, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Manifest, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } @@ -494,8 +559,8 @@ func (m *oci1Index) updateDesc() error { return err } m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeOCI1ManifestList, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1ManifestList, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } @@ -507,8 +572,8 @@ func (m *oci1Artifact) updateDesc() error { return err } m.rawBody = mj - m.desc = types.Descriptor{ - MediaType: types.MediaTypeOCI1Artifact, + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Artifact, Digest: digest.FromBytes(mj), Size: int64(len(mj)), } diff --git a/vendor/github.com/regclient/regclient/types/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype.go index 403ff4869..d80acde8d 100644 --- a/vendor/github.com/regclient/regclient/types/mediatype.go +++ b/vendor/github.com/regclient/regclient/types/mediatype.go @@ -1,40 +1,91 @@ package types +import ( + "github.com/regclient/regclient/types/mediatype" +) + const ( - // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests - MediaTypeDocker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" - // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing - MediaTypeDocker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry - MediaTypeDocker2Manifest = "application/vnd.docker.distribution.manifest.v2+json" - // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry - MediaTypeDocker2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - // MediaTypeDocker2ImageConfig is for the configuration json object media type - MediaTypeDocker2ImageConfig = "application/vnd.docker.container.image.v1+json" - // MediaTypeOCI1Artifact EXPERIMENTAL OCI v1 artifact media type - MediaTypeOCI1Artifact = "application/vnd.oci.artifact.manifest.v1+json" - // MediaTypeOCI1Manifest OCI v1 manifest media type - MediaTypeOCI1Manifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeOCI1ManifestList OCI v1 manifest list media type - MediaTypeOCI1ManifestList = "application/vnd.oci.image.index.v1+json" - // MediaTypeOCI1ImageConfig OCI v1 configuration json object media type - MediaTypeOCI1ImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeDocker2LayerGzip is the default compressed layer for docker schema2 - MediaTypeDocker2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // MediaTypeDocker2ForeignLayer is the default compressed layer for foreign layers in docker schema2 - MediaTypeDocker2ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - // MediaTypeOCI1Layer is the uncompressed layer for OCIv1 - MediaTypeOCI1Layer = "application/vnd.oci.image.layer.v1.tar" - // MediaTypeOCI1LayerGzip is the gzip compressed layer for OCI v1 - MediaTypeOCI1LayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" - // MediaTypeOCI1LayerZstd is the zstd compressed layer for OCI v1 - MediaTypeOCI1LayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" - // MediaTypeOCI1ForeignLayer is the foreign layer for OCI v1 - MediaTypeOCI1ForeignLayer = "application/vnd.oci.image.layer.nondistributable.v1.tar" - // MediaTypeOCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1 - MediaTypeOCI1ForeignLayerGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - // MediaTypeOCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1 - MediaTypeOCI1ForeignLayerZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - // MediaTypeBuildkitCacheConfig is used by buildkit cache images - MediaTypeBuildkitCacheConfig = "application/vnd.buildkit.cacheconfig.v0" + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. + // + // Deprecated: replace with [mediatype.Docker1Manifest]. + MediaTypeDocker1Manifest = mediatype.Docker1Manifest + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. + // + // Deprecated: replace with [mediatype.Docker1ManifestSigned]. + MediaTypeDocker1ManifestSigned = mediatype.Docker1ManifestSigned + // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2Manifest]. + MediaTypeDocker2Manifest = mediatype.Docker2Manifest + // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2ManifestList]. + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList + // MediaTypeDocker2ImageConfig is for the configuration json object media type. + // + // Deprecated: replace with [mediatype.Docker2ImageConfig]. + MediaTypeDocker2ImageConfig = mediatype.Docker2ImageConfig + // MediaTypeOCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + // + // Deprecated: replace with [mediatype.OCI1Artifact]. + MediaTypeOCI1Artifact = mediatype.OCI1Artifact + // MediaTypeOCI1Manifest OCI v1 manifest media type. + // + // Deprecated: replace with [mediatype.OCI1Manifest]. + MediaTypeOCI1Manifest = mediatype.OCI1Manifest + // MediaTypeOCI1ManifestList OCI v1 manifest list media type. + // + // Deprecated: replace with [mediatype.OCI1ManifestList]. + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList + // MediaTypeOCI1ImageConfig OCI v1 configuration json object media type. + // + // Deprecated: replace with [mediatype.OCI1ImageConfig]. + MediaTypeOCI1ImageConfig = mediatype.OCI1ImageConfig + // MediaTypeDocker2LayerGzip is the default compressed layer for docker schema2. + // + // Deprecated: replace with [mediatype.Docker2LayerGzip]. + MediaTypeDocker2LayerGzip = mediatype.Docker2LayerGzip + // MediaTypeDocker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + // + // Deprecated: replace with [mediatype.Docker2ForeignLayer]. + MediaTypeDocker2ForeignLayer = mediatype.Docker2ForeignLayer + // MediaTypeOCI1Layer is the uncompressed layer for OCIv1. + // + // Deprecated: replace with [mediatype.OCI1Layer]. + MediaTypeOCI1Layer = mediatype.OCI1Layer + // MediaTypeOCI1LayerGzip is the gzip compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerGzip]. + MediaTypeOCI1LayerGzip = mediatype.OCI1LayerGzip + // MediaTypeOCI1LayerZstd is the zstd compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerZstd]. + MediaTypeOCI1LayerZstd = mediatype.OCI1LayerZstd + // MediaTypeOCI1ForeignLayer is the foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayer]. + MediaTypeOCI1ForeignLayer = mediatype.OCI1ForeignLayer + // MediaTypeOCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerGzip]. + MediaTypeOCI1ForeignLayerGzip = mediatype.OCI1ForeignLayerGzip + // MediaTypeOCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerZstd]. + MediaTypeOCI1ForeignLayerZstd = mediatype.OCI1ForeignLayerZstd + // MediaTypeOCI1Empty is used for blobs containing the empty JSON data `{}`. + // + // Deprecated: replace with [mediatype.OCI1Empty]. + MediaTypeOCI1Empty = mediatype.OCI1Empty + // MediaTypeBuildkitCacheConfig is used by buildkit cache images. + // + // Deprecated: replace with [mediatype.BuildkitCacheConfig]. + MediaTypeBuildkitCacheConfig = mediatype.BuildkitCacheConfig +) + +var ( + // Base cleans the Content-Type header to return only the lower case base media type. + // + // Deprecated: replace with [mediatype.Base]. + MediaTypeBase = mediatype.Base ) diff --git a/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go new file mode 100644 index 000000000..914ac538f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go @@ -0,0 +1,51 @@ +// Package mediatype defines well known media types. +package mediatype + +import "strings" + +const ( + // Docker1Manifest deprecated media type for docker schema1 manifests. + Docker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" + // Docker1ManifestSigned is a deprecated schema1 manifest with jws signing. + Docker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // Docker2Manifest is the media type when pulling manifests from a v2 registry. + Docker2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + // Docker2ManifestList is the media type when pulling a manifest list from a v2 registry. + Docker2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Docker2ImageConfig is for the configuration json object media type. + Docker2ImageConfig = "application/vnd.docker.container.image.v1+json" + // OCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + OCI1Artifact = "application/vnd.oci.artifact.manifest.v1+json" + // OCI1Manifest OCI v1 manifest media type. + OCI1Manifest = "application/vnd.oci.image.manifest.v1+json" + // OCI1ManifestList OCI v1 manifest list media type. + OCI1ManifestList = "application/vnd.oci.image.index.v1+json" + // OCI1ImageConfig OCI v1 configuration json object media type. + OCI1ImageConfig = "application/vnd.oci.image.config.v1+json" + // Docker2LayerGzip is the default compressed layer for docker schema2. + Docker2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // Docker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + Docker2ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + // OCI1Layer is the uncompressed layer for OCIv1. + OCI1Layer = "application/vnd.oci.image.layer.v1.tar" + // OCI1LayerGzip is the gzip compressed layer for OCI v1. + OCI1LayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // OCI1LayerZstd is the zstd compressed layer for OCI v1. + OCI1LayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // OCI1ForeignLayer is the foreign layer for OCI v1. + OCI1ForeignLayer = "application/vnd.oci.image.layer.nondistributable.v1.tar" + // OCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + OCI1ForeignLayerGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // OCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + OCI1ForeignLayerZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // OCI1Empty is used for blobs containing the empty JSON data `{}`. + OCI1Empty = "application/vnd.oci.empty.v1+json" + // BuildkitCacheConfig is used by buildkit cache images. + BuildkitCacheConfig = "application/vnd.buildkit.cacheconfig.v0" +) + +// Base cleans the Content-Type header to return only the lower case base media type. +func Base(orig string) string { + base, _, _ := strings.Cut(orig, ";") + return strings.TrimSpace(strings.ToLower(base)) +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go index a3647cdb4..8effe1680 100644 --- a/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go +++ b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go @@ -1,8 +1,6 @@ package v1 -import ( - "github.com/regclient/regclient/types" -) +import "github.com/regclient/regclient/types/descriptor" // ArtifactManifest EXPERIMENTAL defines an OCI Artifact type ArtifactManifest struct { @@ -13,14 +11,10 @@ type ArtifactManifest struct { ArtifactType string `json:"artifactType,omitempty"` // Blobs is a collection of blobs referenced by this manifest. - Blobs []types.Descriptor `json:"blobs,omitempty"` - - // Refers indicates this manifest references another manifest - // TODO: deprecated, delete this from future releases - Refers *types.Descriptor `json:"refers,omitempty"` + Blobs []descriptor.Descriptor `json:"blobs,omitempty"` // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *types.Descriptor `json:"subject,omitempty"` + Subject *descriptor.Descriptor `json:"subject,omitempty"` // Annotations contains arbitrary metadata for the artifact manifest. Annotations map[string]string `json:"annotations,omitempty"` diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/config.go b/vendor/github.com/regclient/regclient/types/oci/v1/config.go index 8be23dc92..6448f134b 100644 --- a/vendor/github.com/regclient/regclient/types/oci/v1/config.go +++ b/vendor/github.com/regclient/regclient/types/oci/v1/config.go @@ -1,5 +1,8 @@ package v1 +// Docker specific content in this file is included from +// https://github.com/moby/moby/blob/master/api/types/container/config.go + import ( "time" // crypto libraries included for go-digest @@ -7,6 +10,8 @@ import ( _ "crypto/sha512" digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/platform" ) // ImageConfig defines the execution parameters which should be used as a base when running a container using an image. @@ -38,6 +43,10 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` + // StopTimeout is the time in seconds to stop the container. + // This is a Docker specific extension to the config, and not part of the OCI spec. + StopTimeout *int `json:",omitempty"` + // ArgsEscaped `[Deprecated]` - This field is present only for legacy // compatibility with Docker and should not be used by new image builders. // It is used by Docker for Windows images to indicate that the `Entrypoint` @@ -46,6 +55,18 @@ type ImageConfig struct { // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double // escaping. ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + + // Healthcheck describes how to check if the container is healthy. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + + // OnBuild lists any ONBUILD steps defined in the Dockerfile. + // This is a Docker specific extension to the config, and not part of the OCI spec. + OnBuild []string `json:"OnBuild,omitempty"` + + // Shell for the shell-form of RUN, CMD, and ENTRYPOINT. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Shell []string `json:"Shell,omitempty"` } // RootFS describes a layer content addresses @@ -84,22 +105,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` + // Platform describes the platform which the image in the manifest runs on. + platform.Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` @@ -110,3 +117,25 @@ type Image struct { // History describes the history of each layer. History []History `json:"history,omitempty"` } + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +// This is a Docker specific extension to the config, and not part of the OCI spec. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/index.go b/vendor/github.com/regclient/regclient/types/oci/v1/index.go index d91d6a8e9..887a5c874 100644 --- a/vendor/github.com/regclient/regclient/types/oci/v1/index.go +++ b/vendor/github.com/regclient/regclient/types/oci/v1/index.go @@ -1,7 +1,7 @@ package v1 import ( - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/oci" ) @@ -18,8 +18,14 @@ type Index struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Manifests references platform specific manifests. - Manifests []types.Descriptor `json:"manifests"` + Manifests []descriptor.Descriptor `json:"manifests"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go index 53476ff23..d49c39ae1 100644 --- a/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go +++ b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go @@ -1,7 +1,7 @@ package v1 import ( - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" "github.com/regclient/regclient/types/oci" ) @@ -17,20 +17,19 @@ type Manifest struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config types.Descriptor `json:"config"` + Config descriptor.Descriptor `json:"config"` // Layers is an indexed list of layers referenced by the manifest. - Layers []types.Descriptor `json:"layers"` + Layers []descriptor.Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` - - // Refers indicates this manifest references another manifest - // TODO: deprecated, delete this from future releases - Refers *types.Descriptor `json:"refers,omitempty"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *types.Descriptor `json:"subject,omitempty"` } diff --git a/vendor/github.com/regclient/regclient/types/ping/ping.go b/vendor/github.com/regclient/regclient/types/ping/ping.go new file mode 100644 index 000000000..9417b4a51 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ping/ping.go @@ -0,0 +1,13 @@ +// Package ping is used for data types with the Ping methods. +package ping + +import ( + "io/fs" + "net/http" +) + +// Result is the response to a ping request. +type Result struct { + Header http.Header // Header is defined for responses from a registry. + Stat fs.FileInfo // Stat is defined for responses from an ocidir. +} diff --git a/vendor/github.com/regclient/regclient/types/platform/compare.go b/vendor/github.com/regclient/regclient/types/platform/compare.go new file mode 100644 index 000000000..530180336 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/compare.go @@ -0,0 +1,213 @@ +package platform + +import ( + "strconv" + "strings" +) + +type compare struct { + host Platform +} + +type CompareOpts func(*compare) + +// NewCompare is used to compare multiple target entries to a host value. +func NewCompare(host Platform, opts ...CompareOpts) *compare { + (&host).normalize() + c := compare{ + host: host, + } + for _, optFn := range opts { + optFn(&c) + } + return &c +} + +// Better returns true when the target is compatible and a better match than the previous platform. +// The previous platform value may be the zero value when no previous match has been found. +func (c *compare) Better(target, prev Platform) bool { + if !Compatible(c.host, target) { + return false + } + (&target).normalize() + (&prev).normalize() + if prev.OS != target.OS { + if target.OS == c.host.OS { + return true + } else if prev.OS == c.host.OS { + return false + } + } + if prev.Architecture != target.Architecture { + if target.Architecture == c.host.Architecture { + return true + } else if prev.Architecture == c.host.Architecture { + return false + } + } + if prev.Variant != target.Variant { + if target.Variant == c.host.Variant { + return true + } else if prev.Variant == c.host.Variant { + return false + } + pV := variantVer(prev.Variant) + tV := variantVer(target.Variant) + if tV > pV { + return true + } else if tV < pV { + return false + } + } + if prev.OSVersion != target.OSVersion { + if target.OSVersion == c.host.OSVersion { + return true + } else if prev.OSVersion == c.host.OSVersion { + return false + } + cmp := semverCmp(prev.OSVersion, target.OSVersion) + if cmp != 0 { + return cmp < 0 + } + } + return false +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func (c *compare) Compatible(target Platform) bool { + (&target).normalize() + if c.host.OS == "linux" { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else if c.host.OS == "windows" { + if target.OS == "windows" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + osVerCompatible(c.host.OSVersion, target.OSVersion) + } else if target.OS == "linux" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } + return false + } else if c.host.OS == "darwin" { + return (target.OS == "darwin" || target.OS == "linux") && + c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Match indicates if two platforms are the same. +func (c *compare) Match(target Platform) bool { + (&target).normalize() + if c.host.OS != target.OS { + return false + } + if c.host.OS == "linux" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant + } else if c.host.OS == "windows" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant && + osVerSemver(c.host.OSVersion) == osVerSemver(target.OSVersion) + } else { + return c.host.Architecture == target.Architecture && + c.host.Variant == target.Variant && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func Compatible(host, target Platform) bool { + comp := NewCompare(host) + return comp.Compatible(target) +} + +// Match indicates if two platforms are the same. +func Match(a, b Platform) bool { + comp := NewCompare(a) + return comp.Match(b) +} + +func osVerCompatible(host, target string) bool { + if host == "" { + return true + } + vHost := osVerSemver(host) + vTarget := osVerSemver(target) + return vHost == vTarget +} + +func osVerSemver(platVer string) string { + verParts := strings.Split(platVer, ".") + if len(verParts) < 4 { + return platVer + } + return strings.Join(verParts[0:3], ".") +} + +// return: -1 if ab +func semverCmp(a, b string) int { + aParts := strings.Split(a, ".") + bParts := strings.Split(b, ".") + for i := range aParts { + if len(bParts) < i+1 { + return 1 + } + aInt, aErr := strconv.Atoi(aParts[i]) + bInt, bErr := strconv.Atoi(bParts[i]) + if aErr != nil { + if bErr != nil { + return 0 + } + return -1 + } + if bErr != nil { + return 1 + } + if aInt < bInt { + return -1 + } + if aInt > bInt { + return 1 + } + } + return 0 +} + +func strSliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func variantCompatible(host, target string) bool { + vHost := variantVer(host) + vTarget := variantVer(target) + if vHost >= vTarget || (vHost == 1 && target == "") || (host == "" && vTarget == 1) { + return true + } + return false +} + +func variantVer(v string) int { + v = strings.TrimPrefix(v, "v") + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go index 15fe65a67..75fe335a1 100644 --- a/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go @@ -1,28 +1,19 @@ -// base on: -/* - Copyright The containerd Authors. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ +// Related implementations: +// +// +// +// +// package platform import ( - "bufio" - "os" "runtime" - "strings" "sync" ) -// Present the ARM instruction set architecture, eg: v7, v8 +// cpuVariantValue is the variant of the local CPU architecture. +// For example on ARM, v7 and v8. And on AMD64, v1 - v4. // Don't use this value directly; call cpuVariant() instead. var cpuVariantValue string @@ -31,81 +22,9 @@ var cpuVariantOnce sync.Once func cpuVariant() string { cpuVariantOnce.Do(func() { switch runtime.GOARCH { - case "arm", "arm64": - cpuVariantValue = getCPUVariant() + case "amd64", "arm", "arm64": + cpuVariantValue = lookupCPUVariant() } }) return cpuVariantValue } - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string) { - if runtime.GOOS != "linux" { - return "" - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "" - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]) - } - } - return "" -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - switch runtime.GOARCH { - case "arm64": - return "v8" - case "arm": - return "v7" - } - return "" - } - - variant := getCPUInfo("Cpu architecture") - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model := getCPUInfo("model name") - if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "" - } - - return variant -} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go new file mode 100644 index 000000000..60940dfac --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go @@ -0,0 +1,83 @@ +//go:build arm || arm64 +// +build arm arm64 + +package platform + +import ( + "bufio" + "os" + "runtime" + "strings" +) + +func lookupCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + return "v8" + case "arm": + return "v7" + } + return "" + } + + variant := getCPUInfo("Cpu architecture") + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model := getCPUInfo("model name") + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string) { + if runtime.GOOS != "linux" { + return "" + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "" + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]) + } + } + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go new file mode 100644 index 000000000..5ac63b2e1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go @@ -0,0 +1,8 @@ +//go:build !386 && !amd64 && !amd64p32 && !arm && !arm64 +// +build !386,!amd64,!amd64p32,!arm,!arm64 + +package platform + +func lookupCPUVariant() string { + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go new file mode 100644 index 000000000..b2a940775 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go @@ -0,0 +1,100 @@ +//go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 + +package platform + +const ( + ecx1SSE3 = 0 + ecx1SSSE3 = 9 + ecx1FMA = 12 + ecx1CX16 = 13 + ecx1SSE4_1 = 19 + ecx1SSE4_2 = 20 + ecx1MOVBE = 22 + ecx1POPCNT = 23 + ecx1XSAVE = 26 + ecx1OSXSAVE = 27 + ecx1AVX = 28 + ecx1F16C = 29 + + ebx7BMI1 = 3 + ebx7AVX2 = 5 + ebx7BMI2 = 8 + ebx7AVX512F = 16 + ebx7AVX512DQ = 17 + ebx7AVX512CD = 28 + ebx7AVX512BW = 30 + ebx7AVX512VL = 31 + + ecxxLAHF = 0 + ecxxLZCNT = 5 + + eaxOSXMM = 1 + eaxOSYMM = 2 + eaxOSOpMask = 5 + eaxOSZMMHi16 = 6 + eaxOSZMMHi256 = 7 +) + +var ( + // GOAMD64=v1 (default): The baseline. Exclusively generates instructions that all 64-bit x86 processors can execute. + // GOAMD64=v2: all v1 instructions, plus CX16, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3. + // GOAMD64=v3: all v2 instructions, plus AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, OSXSAVE. + // GOAMD64=v4: all v3 instructions, plus AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL. + ecx1FeaturesV2 = bitSet(ecx1CX16) | bitSet(ecx1POPCNT) | bitSet(ecx1SSE3) | bitSet(ecx1SSE4_1) | bitSet(ecx1SSE4_2) | bitSet(ecx1SSSE3) + ecx1FeaturesV3 = ecx1FeaturesV2 | bitSet(ecx1AVX) | bitSet(ecx1F16C) | bitSet(ecx1FMA) | bitSet(ecx1MOVBE) | bitSet(ecx1OSXSAVE) + ebx7FeaturesV3 = bitSet(ebx7AVX2) | bitSet(ebx7BMI1) | bitSet(ebx7BMI2) + ebx7FeaturesV4 = ebx7FeaturesV3 | bitSet(ebx7AVX512F) | bitSet(ebx7AVX512BW) | bitSet(ebx7AVX512CD) | bitSet(ebx7AVX512DQ) | bitSet(ebx7AVX512VL) + ecxxFeaturesV2 = bitSet(ecxxLAHF) + ecxxFeaturesV3 = ecxxFeaturesV2 | bitSet(ecxxLZCNT) + eaxOSFeaturesV3 = bitSet(eaxOSXMM) | bitSet(eaxOSYMM) + eaxOSFeaturesV4 = eaxOSFeaturesV3 | bitSet(eaxOSOpMask) | bitSet(eaxOSZMMHi16) | bitSet(eaxOSZMMHi256) +) + +// cpuid is implemented in cpuinfo_x86.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s. +func xgetbv() (eax, edx uint32) + +func lookupCPUVariant() string { + variant := "v1" + maxID, _, _, _ := cpuid(0, 0) + if maxID < 7 { + return variant + } + _, _, ecx1, _ := cpuid(1, 0) + _, ebx7, _, _ := cpuid(7, 0) + maxX, _, _, _ := cpuid(0x80000000, 0) + _, _, ecxx, _ := cpuid(0x80000001, 0) + + if maxX < 0x80000001 || !bitIsSet(ecx1FeaturesV2, ecx1) || !bitIsSet(ecxxFeaturesV2, ecxx) { + return variant + } + variant = "v2" + + if !bitIsSet(ecx1FeaturesV3, ecx1) || !bitIsSet(ebx7FeaturesV3, ebx7) || !bitIsSet(ecxxFeaturesV3, ecxx) { + return variant + } + // For XGETBV, OSXSAVE bit is required and verified by ecx1FeaturesV3. + eaxOS, _ := xgetbv() + if !bitIsSet(eaxOSFeaturesV3, eaxOS) { + return variant + } + variant = "v3" + + // Darwin support for AVX-512 appears to have issues. + if isDarwin || !bitIsSet(ebx7FeaturesV4, ebx7) || !bitIsSet(eaxOSFeaturesV4, eaxOS) { + return variant + } + variant = "v4" + + return variant +} + +func bitSet(bitpos uint) uint32 { + return 1 << bitpos +} +func bitIsSet(bits, value uint32) bool { + return (value & bits) == value +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s new file mode 100644 index 000000000..7d7ba33ef --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/github.com/regclient/regclient/types/platform/os_darwin.go b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go new file mode 100644 index 000000000..161f89eda --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go @@ -0,0 +1,8 @@ +//go:build darwin +// +build darwin + +package platform + +const isDarwin = true + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/os_other.go b/vendor/github.com/regclient/regclient/types/platform/os_other.go new file mode 100644 index 000000000..01b004e6a --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_other.go @@ -0,0 +1,8 @@ +//go:build !darwin +// +build !darwin + +package platform + +const isDarwin = false + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/platform.go b/vendor/github.com/regclient/regclient/types/platform/platform.go index a08366020..beb002461 100644 --- a/vendor/github.com/regclient/regclient/types/platform/platform.go +++ b/vendor/github.com/regclient/regclient/types/platform/platform.go @@ -20,13 +20,14 @@ import ( "fmt" "path" "regexp" - "runtime" "strings" + + "github.com/regclient/regclient/internal/strparse" + "github.com/regclient/regclient/types/errs" ) var ( partRE = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) - verRE = regexp.MustCompile(`^[A-Za-z0-9\._-]+$`) ) // Platform specifies a platform where a particular image manifest is applicable. @@ -55,75 +56,20 @@ func (p Platform) String() string { (&p).normalize() if p.OS == "" { return "unknown" - } else if p.OS == "windows" { - return path.Join(p.OS, p.Architecture, p.OSVersion) } else { return path.Join(p.OS, p.Architecture, p.Variant) } } -// Compatible indicates if a host can run a specified target platform image. -// This accounts for Docker Desktop for Mac and Windows using a Linux VM. -func Compatible(host, target Platform) bool { - (&host).normalize() - (&target).normalize() - if host.OS == "linux" { - return host.OS == target.OS && host.Architecture == target.Architecture && host.Variant == target.Variant - } else if host.OS == "windows" { - if target.OS == "windows" { - return host.Architecture == target.Architecture && host.Variant == target.Variant && - prefix(host.OSVersion) == prefix(target.OSVersion) - } else if target.OS == "linux" { - return host.Architecture == target.Architecture && host.Variant == target.Variant - } - return false - } else if host.OS == "darwin" { - if target.OS == "darwin" || target.OS == "linux" { - return host.Architecture == target.Architecture && host.Variant == target.Variant - } - return false - } else { - return host.Architecture == target.Architecture && - host.OSVersion == target.OSVersion && - strSliceEq(host.OSFeatures, target.OSFeatures) && - host.Variant == target.Variant && - strSliceEq(host.Features, target.Features) - } -} - -// Match indicates if two platforms are the same -func Match(a, b Platform) bool { - (&a).normalize() - (&b).normalize() - if a.OS != b.OS { - return false - } - if a.OS == "linux" { - return a.Architecture == b.Architecture && a.Variant == b.Variant - } else if a.OS == "windows" { - return a.Architecture == b.Architecture && - prefix(a.OSVersion) == prefix(b.OSVersion) - } else { - return a.Architecture == b.Architecture && - a.OSVersion == b.OSVersion && - strSliceEq(a.OSFeatures, b.OSFeatures) && - a.Variant == b.Variant && - strSliceEq(a.Features, b.Features) - } -} - // Parse converts a platform string into a struct func Parse(platStr string) (Platform, error) { + // args are a regclient specific way to extend the platform string + platArgs := strings.SplitN(platStr, ",", 2) // split on slash, validate each component - platSplit := strings.Split(platStr, "/") + platSplit := strings.Split(platArgs[0], "/") for i, part := range platSplit { - if i == 2 && platSplit[0] == "windows" { - // TODO: (bmitch) this may not be officially allowed, but I can't find a decent reference for what it should be - if !verRE.MatchString(part) { - return Platform{}, fmt.Errorf("invalid platform component %s in %s", part, platStr) - } - } else if !partRE.MatchString(part) { - return Platform{}, fmt.Errorf("invalid platform component %s in %s", part, platStr) + if !partRE.MatchString(part) { + return Platform{}, fmt.Errorf("invalid platform component %s in %s%.0w", part, platStr, errs.ErrParsingFailed) } platSplit[i] = strings.ToLower(part) } @@ -135,38 +81,55 @@ func Parse(platStr string) (Platform, error) { plat.Architecture = platSplit[1] } if len(platSplit) >= 3 { - if plat.OS == "windows" { - plat.OSVersion = platSplit[2] - } else { - plat.Variant = platSplit[2] + plat.Variant = platSplit[2] + } + if len(platArgs) > 1 { + kvMap, err := strparse.SplitCSKV(platArgs[1]) + if err != nil { + return Platform{}, fmt.Errorf("failed to split platform args in %s: %w", platStr, err) + } + for k, v := range kvMap { + k := strings.TrimSpace(k) + v := strings.TrimSpace(v) + switch strings.ToLower(k) { + case "osver", "osversion": + plat.OSVersion = v + default: + return Platform{}, fmt.Errorf("unsupported platform arg type, %s in %s%.0w", k, platStr, errs.ErrParsingFailed) + } } } - // extrapolate missing fields and normalize + // gather local platform details platLocal := Local() - if plat.OS == "" || plat.OS == "local" { - // assume local OS - plat.OS = platLocal.OS - } + // normalize and extrapolate missing fields switch plat.OS { - case "macos": - plat.OS = "darwin" + case "local", "": + plat.OS = platLocal.OS } - if len(platSplit) < 2 && plat.OS == runtime.GOOS { + plat.normalize() + if len(platSplit) < 2 && Compatible(Platform{OS: platLocal.OS}, Platform{OS: plat.OS}) { + // automatically expand local architecture with recognized OS switch plat.OS { case "linux", "darwin": - // automatically expand local architecture with recognized OS plat.Architecture = platLocal.Architecture + plat.Variant = platLocal.Variant case "windows": plat.Architecture = platLocal.Architecture - plat.OSVersion = platLocal.OSVersion + plat.Variant = platLocal.Variant } } - plat.normalize() + if plat.OS == "windows" && plat.OS == platLocal.OS && plat.Architecture == platLocal.Architecture && plat.Variant == platLocal.Variant { + plat.OSVersion = platLocal.OSVersion + } return *plat, nil } func (p *Platform) normalize() { + switch p.OS { + case "macos": + p.OS = "darwin" + } switch p.Architecture { case "i386": p.Architecture = "386" @@ -197,23 +160,3 @@ func (p *Platform) normalize() { } } } - -func prefix(platVer string) string { - verParts := strings.Split(platVer, ".") - if len(verParts) < 4 { - return platVer - } - return strings.Join(verParts[0:3], ".") -} - -func strSliceEq(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_other.go b/vendor/github.com/regclient/regclient/types/platform/platform_other.go index 7285bf11c..be99816e3 100644 --- a/vendor/github.com/regclient/regclient/types/platform/platform_other.go +++ b/vendor/github.com/regclient/regclient/types/platform/platform_other.go @@ -7,9 +7,11 @@ import "runtime" // Local retrieves the local platform details func Local() Platform { - return Platform{ + plat := Platform{ OS: runtime.GOOS, Architecture: runtime.GOARCH, Variant: cpuVariant(), } + plat.normalize() + return plat } diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_windows.go b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go index afa9db9a3..d0dda6a47 100644 --- a/vendor/github.com/regclient/regclient/types/platform/platform_windows.go +++ b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go @@ -13,10 +13,12 @@ import ( // Local retrieves the local platform details func Local() Platform { major, minor, build := windows.RtlGetNtVersionNumbers() - return Platform{ + plat := Platform{ OS: runtime.GOOS, Architecture: runtime.GOARCH, Variant: cpuVariant(), OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), } + plat.normalize() + return plat } diff --git a/vendor/github.com/regclient/regclient/types/ref/ref.go b/vendor/github.com/regclient/regclient/types/ref/ref.go index 4b464a6df..c01b02c46 100644 --- a/vendor/github.com/regclient/regclient/types/ref/ref.go +++ b/vendor/github.com/regclient/regclient/types/ref/ref.go @@ -1,6 +1,6 @@ -// Package ref is used to define references -// References default to remote registry references (registry:port/repo:tag) -// Schemes can be included in front of the reference for different reference types +// Package ref is used to define references. +// References default to remote registry references (registry:port/repo:tag). +// Schemes can be included in front of the reference for different reference types. package ref import ( @@ -8,63 +8,61 @@ import ( "path" "regexp" "strings" + + "github.com/regclient/regclient/types/errs" ) const ( dockerLibrary = "library" - // DockerRegistry is the name resolved in docker images on Hub + // dockerRegistry is the name resolved in docker images on Hub. dockerRegistry = "docker.io" - // DockerRegistryLegacy is the name resolved in docker images on Hub + // dockerRegistryLegacy is the name resolved in docker images on Hub. dockerRegistryLegacy = "index.docker.io" - // DockerRegistryDNS is the host to connect to for Hub + // dockerRegistryDNS is the host to connect to for Hub. dockerRegistryDNS = "registry-1.docker.io" ) var ( - hostPartS = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)` - // host with port allows a short name in addition to hostDomainS - hostPortS = `(?:` + hostPartS + `(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)*` + regexp.QuoteMeta(`.`) + `?` + regexp.QuoteMeta(`:`) + `[0-9]+)` - // hostname may be ip, fqdn (example.com), or trailing dot (example.) + hostPartS = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)` + hostPortS = `(?:` + hostPartS + `(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)*` + regexp.QuoteMeta(`.`) + `?` + regexp.QuoteMeta(`:`) + `[0-9]+)` hostDomainS = `(?:` + hostPartS + `(?:(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)+` + regexp.QuoteMeta(`.`) + `?|` + regexp.QuoteMeta(`.`) + `))` hostUpperS = `(?:[a-zA-Z0-9]*[A-Z][a-zA-Z0-9-]*[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[A-Z][a-zA-Z0-9]*)` - registryS = `(?:` + hostDomainS + `|` + hostPortS + `|` + hostUpperS + `|localhost(?:` + regexp.QuoteMeta(`:`) + `[0-9]+))` - repoPartS = `[a-z0-9]+(?:(?:[_.]|__|[-]*)[a-z0-9]+)*` + registryS = `(?:` + hostDomainS + `|` + hostPortS + `|` + hostUpperS + `|localhost(?:` + regexp.QuoteMeta(`:`) + `[0-9]+)?)` + repoPartS = `[a-z0-9]+(?:(?:\.|_|__|-+)[a-z0-9]+)*` pathS = `[/a-zA-Z0-9_\-. ]+` - tagS = `[\w][\w.-]{0,127}` + tagS = `[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}` digestS = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + schemeRE = regexp.MustCompile(`^([a-z]+)://(.+)$`) + registryRE = regexp.MustCompile(`^(` + registryS + `)$`) refRE = regexp.MustCompile(`^(?:(` + registryS + `)` + regexp.QuoteMeta(`/`) + `)?` + `(` + repoPartS + `(?:` + regexp.QuoteMeta(`/`) + repoPartS + `)*)` + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) - schemeRE = regexp.MustCompile(`^([a-z]+)://(.+)$`) - pathRE = regexp.MustCompile(`^(` + pathS + `)` + + ocidirRE = regexp.MustCompile(`^(` + pathS + `)` + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) ) -// Ref reference to a registry/repository -// If the tag or digest is available, it's also included in the reference. -// Reference itself is the unparsed string. -// While this is currently a struct, that may change in the future and access -// to contents should not be assumed/used. +// Ref is a reference to a registry/repository. +// Direct access to the contents of this struct should not be assumed. type Ref struct { - Scheme string - Reference string // unparsed string - Registry string // server, host:port - Repository string // path on server - Tag string - Digest string - Path string + Scheme string // Scheme is the type of reference, "reg" or "ocidir". + Reference string // Reference is the unparsed string or common name. + Registry string // Registry is the server for the "reg" scheme. + Repository string // Repository is the path on the registry for the "reg" scheme. + Tag string // Tag is a mutable tag for a reference. + Digest string // Digest is an immutable hash for a reference. + Path string // Path is the directory of the OCI Layout for "ocidir". } -// New returns a reference based on the scheme, defaulting to a +// New returns a reference based on the scheme (defaulting to "reg"). func New(parse string) (Ref, error) { scheme := "" - path := parse + tail := parse matchScheme := schemeRE.FindStringSubmatch(parse) if len(matchScheme) == 3 { scheme = matchScheme[1] - path = matchScheme[2] + tail = matchScheme[2] } ret := Ref{ Scheme: scheme, @@ -73,12 +71,12 @@ func New(parse string) (Ref, error) { switch scheme { case "": ret.Scheme = "reg" - matchRef := refRE.FindStringSubmatch(path) + matchRef := refRE.FindStringSubmatch(tail) if matchRef == nil || len(matchRef) < 5 { - if refRE.FindStringSubmatch(strings.ToLower(path)) != nil { - return Ref{}, fmt.Errorf("invalid reference \"%s\", repo must be lowercase", path) + if refRE.FindStringSubmatch(strings.ToLower(tail)) != nil { + return Ref{}, fmt.Errorf("%w \"%s\", repo must be lowercase", errs.ErrInvalidReference, tail) } - return Ref{}, fmt.Errorf("invalid reference \"%s\"", path) + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) } ret.Registry = matchRef[1] ret.Repository = matchRef[2] @@ -102,13 +100,13 @@ func New(parse string) (Ref, error) { ret.Tag = "latest" } if ret.Repository == "" { - return Ref{}, fmt.Errorf("invalid reference \"%s\"", path) + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) } case "ocidir", "ocifile": - matchPath := pathRE.FindStringSubmatch(path) + matchPath := ocidirRE.FindStringSubmatch(tail) if matchPath == nil || len(matchPath) < 2 || matchPath[1] == "" { - return Ref{}, fmt.Errorf("invalid path for scheme \"%s\": %s", scheme, path) + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrInvalidReference, scheme, tail) } ret.Path = matchPath[1] if len(matchPath) > 2 && matchPath[2] != "" { @@ -119,12 +117,51 @@ func New(parse string) (Ref, error) { } default: - return Ref{}, fmt.Errorf("unhandled reference scheme \"%s\" in \"%s\"", scheme, parse) + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrInvalidReference, scheme, parse) + } + return ret, nil +} + +// NewHost returns a Reg for a registry hostname or equivalent. +// The ocidir schema equivalent is the path. +func NewHost(parse string) (Ref, error) { + scheme := "" + tail := parse + matchScheme := schemeRE.FindStringSubmatch(parse) + if len(matchScheme) == 3 { + scheme = matchScheme[1] + tail = matchScheme[2] + } + ret := Ref{ + Scheme: scheme, + } + + switch scheme { + case "": + ret.Scheme = "reg" + matchReg := registryRE.FindStringSubmatch(tail) + if matchReg == nil || len(matchReg) < 2 { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + ret.Registry = matchReg[1] + if ret.Registry == "" { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + + case "ocidir", "ocifile": + matchPath := ocidirRE.FindStringSubmatch(tail) + if matchPath == nil || len(matchPath) < 2 || matchPath[1] == "" { + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrParsingFailed, scheme, tail) + } + ret.Path = matchPath[1] + + default: + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrParsingFailed, scheme, parse) } return ret, nil } -// CommonName outputs a parsable name from a reference +// CommonName outputs a parsable name from a reference. func (r Ref) CommonName() string { cn := "" switch r.Scheme { @@ -154,7 +191,34 @@ func (r Ref) CommonName() string { return cn } -// IsZero returns true if ref is unset +// IsSet returns true if needed values are defined for a specific reference. +func (r Ref) IsSet() bool { + if !r.IsSetRepo() { + return false + } + // Registry requires a tag or digest, OCI Layout doesn't require these. + if r.Scheme == "reg" && r.Tag == "" && r.Digest == "" { + return false + } + return true +} + +// IsSetRepo returns true when the ref includes values for a specific repository. +func (r Ref) IsSetRepo() bool { + switch r.Scheme { + case "reg": + if r.Registry != "" && r.Repository != "" { + return true + } + case "ocidir": + if r.Path != "" { + return true + } + } + return false +} + +// IsZero returns true if ref is unset. func (r Ref) IsZero() bool { if r.Scheme == "" && r.Registry == "" && r.Repository == "" && r.Path == "" && r.Tag == "" && r.Digest == "" { return true @@ -162,7 +226,25 @@ func (r Ref) IsZero() bool { return false } -// ToReg converts a reference to a registry like syntax +// SetDigest returns a ref with the requested digest set. +// The tag will be unset and the reference value will be reset. +func (r Ref) SetDigest(digest string) Ref { + r.Digest = digest + r.Tag = "" + r.Reference = r.CommonName() + return r +} + +// SetTag returns a ref with the requested tag set. +// The digest will be unset and the reference value will be reset. +func (r Ref) SetTag(tag string) Ref { + r.Tag = tag + r.Digest = "" + r.Reference = r.CommonName() + return r +} + +// ToReg converts a reference to a registry like syntax. func (r Ref) ToReg() Ref { switch r.Scheme { case "ocidir": @@ -178,7 +260,7 @@ func (r Ref) ToReg() Ref { return r } -// EqualRegistry compares the registry between two references +// EqualRegistry compares the registry between two references. func EqualRegistry(a, b Ref) bool { if a.Scheme != b.Scheme { return false @@ -196,7 +278,7 @@ func EqualRegistry(a, b Ref) bool { } } -// EqualRepository compares the repository between two references +// EqualRepository compares the repository between two references. func EqualRepository(a, b Ref) bool { if a.Scheme != b.Scheme { return false diff --git a/vendor/github.com/regclient/regclient/types/referrer/referrer.go b/vendor/github.com/regclient/regclient/types/referrer/referrer.go index 99937565a..2cbb727f1 100644 --- a/vendor/github.com/regclient/regclient/types/referrer/referrer.go +++ b/vendor/github.com/regclient/regclient/types/referrer/referrer.go @@ -8,7 +8,9 @@ import ( "text/tabwriter" "github.com/opencontainers/go-digest" - "github.com/regclient/regclient/types" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" "github.com/regclient/regclient/types/manifest" v1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/ref" @@ -16,11 +18,11 @@ import ( // ReferrerList contains the response to a request for referrers to a subject type ReferrerList struct { - Subject ref.Ref `json:"subject"` // subject queried - Descriptors []types.Descriptor `json:"descriptors"` // descriptors found in Index - Annotations map[string]string `json:"annotations,omitempty"` // annotations extracted from Index - Manifest manifest.Manifest `json:"-"` // returned OCI Index - Tags []string `json:"-"` // tags matched when fetching referrers + Subject ref.Ref `json:"subject"` // subject queried + Descriptors []descriptor.Descriptor `json:"descriptors"` // descriptors found in Index + Annotations map[string]string `json:"annotations,omitempty"` // annotations extracted from Index + Manifest manifest.Manifest `json:"-"` // returned OCI Index + Tags []string `json:"-"` // tags matched when fetching referrers } // Add appends an entry to rl.Manifest, used to modify the client managed Index @@ -43,13 +45,21 @@ func (rl *ReferrerList) Add(m manifest.Manifest) error { mDesc.ArtifactType = mOrig.ArtifactType case v1.Manifest: mDesc.Annotations = mOrig.Annotations - mDesc.ArtifactType = mOrig.Config.MediaType + if mOrig.ArtifactType != "" { + mDesc.ArtifactType = mOrig.ArtifactType + } else { + mDesc.ArtifactType = mOrig.Config.MediaType + } + case v1.Index: + mDesc.Annotations = mOrig.Annotations + mDesc.ArtifactType = mOrig.ArtifactType default: // other types are not supported - return fmt.Errorf("invalid manifest for referrer \"%t\": %w", m.GetOrig(), types.ErrUnsupportedMediaType) + return fmt.Errorf("invalid manifest for referrer \"%t\": %w", m.GetOrig(), errs.ErrUnsupportedMediaType) } // append descriptor to index rlM.Manifests = append(rlM.Manifests, mDesc) + rl.Descriptors = rlM.Manifests err := rl.Manifest.SetOrig(rlM) if err != nil { return err @@ -77,8 +87,9 @@ func (rl *ReferrerList) Delete(m manifest.Manifest) error { } } if !found { - return fmt.Errorf("subject not found in referrer list%.0w", types.ErrNotFound) + return fmt.Errorf("subject not found in referrer list%.0w", errs.ErrNotFound) } + rl.Descriptors = rlM.Manifests err := rl.Manifest.SetOrig(rlM) if err != nil { return err @@ -129,19 +140,17 @@ func (rl ReferrerList) MarshalPretty() ([]byte, error) { fmt.Fprintf(tw, " %s:\t%s\n", name, val) } } - tw.Flush() - return buf.Bytes(), nil + err := tw.Flush() + return buf.Bytes(), err } // FallbackTag returns the ref that should be used when the registry does not support the referrers API func FallbackTag(r ref.Ref) (ref.Ref, error) { - rr := r dig, err := digest.Parse(r.Digest) if err != nil { - return rr, fmt.Errorf("failed to parse digest for referrers: %w", err) + return r, fmt.Errorf("failed to parse digest for referrers: %w", err) } - rr.Digest = "" - rr.Tag = fmt.Sprintf("%s-%s", dig.Algorithm(), stringMax(dig.Hex(), 64)) + rr := r.SetTag(fmt.Sprintf("%s-%s", dig.Algorithm(), stringMax(dig.Hex(), 64))) return rr, nil } func stringMax(s string, max int) string { diff --git a/vendor/github.com/regclient/regclient/types/repo/repolist.go b/vendor/github.com/regclient/regclient/types/repo/repolist.go index 60943935e..a27aa259d 100644 --- a/vendor/github.com/regclient/regclient/types/repo/repolist.go +++ b/vendor/github.com/regclient/regclient/types/repo/repolist.go @@ -9,9 +9,10 @@ import ( "sort" "strings" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" ) +// RepoList is the response for a repository listing. type RepoList struct { repoCommon RepoRegistryList @@ -34,6 +35,7 @@ type repoConfig struct { type Opts func(*repoConfig) +// New is used to create a repository listing. func New(opts ...Opts) (*RepoList, error) { conf := repoConfig{ mt: "application/json", @@ -57,7 +59,7 @@ func New(opts ...Opts) (*RepoList, error) { return nil, err } default: - return nil, fmt.Errorf("%w: media type: %s, hostname: %s", types.ErrUnsupportedMediaType, conf.mt, conf.host) + return nil, fmt.Errorf("%w: media type: %s, hostname: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.host) } rl.repoCommon = rc @@ -102,7 +104,7 @@ func (r repoCommon) MarshalJSON() ([]byte, error) { if r.orig != nil { return json.Marshal((r.orig)) } - return []byte{}, fmt.Errorf("JSON marshalling failed: %w", types.ErrNotFound) + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) } func (r repoCommon) RawBody() ([]byte, error) { diff --git a/vendor/github.com/regclient/regclient/types/tag/taglist.go b/vendor/github.com/regclient/regclient/types/tag/taglist.go index 299153f44..02661e242 100644 --- a/vendor/github.com/regclient/regclient/types/tag/taglist.go +++ b/vendor/github.com/regclient/regclient/types/tag/taglist.go @@ -10,17 +10,20 @@ import ( "sort" "strings" - "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + ociv1 "github.com/regclient/regclient/types/oci/v1" "github.com/regclient/regclient/types/ref" ) -// List contains a tag list -// Currently this is a struct but the underlying type could be changed to an interface in the future -// Using methods is recommended over directly accessing fields +// List contains a tag list. +// Currently this is a struct but the underlying type could be changed to an interface in the future. +// Using methods is recommended over directly accessing fields. type List struct { tagCommon DockerList GCRList + LayoutList } type tagCommon struct { @@ -32,32 +35,38 @@ type tagCommon struct { url *url.URL } -// DockerList is returned from registry/2.0 API's +// DockerList is returned from registry/2.0 API's. type DockerList struct { Name string `json:"name"` Tags []string `json:"tags"` } -// GCRList fields are from gcr.io +// GCRList fields are from gcr.io. type GCRList struct { Children []string `json:"child,omitempty"` Manifests map[string]GCRManifestInfo `json:"manifest,omitempty"` } +// LayoutList includes the OCI Index from an OCI Layout. +type LayoutList struct { + Index ociv1.Index +} + type tagConfig struct { ref ref.Ref mt string raw []byte header http.Header + index ociv1.Index tags []string url *url.URL } -// Opts defines options for creating a new tag +// Opts defines options for creating a new tag. type Opts func(*tagConfig) -// New creates a tag list from options -// Tags may be provided directly, or they will be parsed from the raw input based on the media type +// New creates a tag list from options. +// Tags may be provided directly, or they will be parsed from the raw input based on the media type. func New(opts ...Opts) (*List, error) { conf := tagConfig{} for _, opt := range opts { @@ -77,18 +86,21 @@ func New(opts ...Opts) (*List, error) { if len(conf.tags) > 0 { tl.Tags = conf.tags } + if conf.index.Manifests != nil { + tl.LayoutList.Index = conf.index + } if len(conf.raw) > 0 { - mt := strings.Split(conf.mt, ";")[0] // "application/json; charset=utf-8" -> "application/json" + mt := mediatype.Base(conf.mt) switch mt { case "application/json", "text/plain": err := json.Unmarshal(conf.raw, &tl) if err != nil { return nil, err } - case types.MediaTypeOCI1ManifestList: + case mediatype.OCI1ManifestList: // noop default: - return nil, fmt.Errorf("%w: media type: %s, reference: %s", types.ErrUnsupportedMediaType, conf.mt, conf.ref.CommonName()) + return nil, fmt.Errorf("%w: media type: %s, reference: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.ref.CommonName()) } } tl.tagCommon = tc @@ -96,35 +108,42 @@ func New(opts ...Opts) (*List, error) { return &tl, nil } -// WithHeaders includes data from http headers when creating tag list +// WithHeaders includes data from http headers when creating tag list. func WithHeaders(header http.Header) Opts { return func(tConf *tagConfig) { tConf.header = header } } -// WithMT sets the returned media type on the tag list +// WithLayoutIndex include the index from an OCI Layout. +func WithLayoutIndex(index ociv1.Index) Opts { + return func(tConf *tagConfig) { + tConf.index = index + } +} + +// WithMT sets the returned media type on the tag list. func WithMT(mt string) Opts { return func(tConf *tagConfig) { tConf.mt = mt } } -// WithRaw defines the raw response from the tag list request +// WithRaw defines the raw response from the tag list request. func WithRaw(raw []byte) Opts { return func(tConf *tagConfig) { tConf.raw = raw } } -// WithRef specifies the reference (repository) associated with the tag list +// WithRef specifies the reference (repository) associated with the tag list. func WithRef(ref ref.Ref) Opts { return func(tConf *tagConfig) { tConf.ref = ref } } -// WithResp includes the response from an http request +// WithResp includes the response from an http request. func WithResp(resp *http.Response) Opts { return func(tConf *tagConfig) { if len(tConf.raw) == 0 { @@ -145,14 +164,14 @@ func WithResp(resp *http.Response) Opts { } } -// WithTags provides the parsed tags for the tag list +// WithTags provides the parsed tags for the tag list. func WithTags(tags []string) Opts { return func(tConf *tagConfig) { tConf.tags = tags } } -// Append extends a tag list with another +// Append extends a tag list with another. func (l *List) Append(add *List) error { // verify two lists are compatible if l.mt != add.mt || !ref.EqualRepository(l.r, add.r) || l.Name != add.Name { @@ -186,12 +205,12 @@ func (l *List) Append(add *List) error { return nil } -// GetOrig returns the underlying tag data structure if defined +// GetOrig returns the underlying tag data structure if defined. func (t tagCommon) GetOrig() interface{} { return t.orig } -// MarshalJSON returns the tag list in json +// MarshalJSON returns the tag list in json. func (t tagCommon) MarshalJSON() ([]byte, error) { if len(t.rawBody) > 0 { return t.rawBody, nil @@ -200,30 +219,30 @@ func (t tagCommon) MarshalJSON() ([]byte, error) { if t.orig != nil { return json.Marshal((t.orig)) } - return []byte{}, fmt.Errorf("JSON marshalling failed: %w", types.ErrNotFound) + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) } -// RawBody returns the original tag list response +// RawBody returns the original tag list response. func (t tagCommon) RawBody() ([]byte, error) { return t.rawBody, nil } -// RawHeaders returns the received http headers +// RawHeaders returns the received http headers. func (t tagCommon) RawHeaders() (http.Header, error) { return t.rawHeader, nil } -// GetURL returns the URL of the request +// GetURL returns the URL of the request. func (t tagCommon) GetURL() *url.URL { return t.url } -// GetTags returns the tags from a list +// GetTags returns the tags from a list. func (tl DockerList) GetTags() ([]string, error) { return tl.Tags, nil } -// MarshalPretty is used for printPretty template formatting +// MarshalPretty is used for printPretty template formatting. func (tl DockerList) MarshalPretty() ([]byte, error) { sort.Slice(tl.Tags, func(i, j int) bool { return strings.Compare(tl.Tags[i], tl.Tags[j]) < 0 diff --git a/vendor/github.com/regclient/regclient/types/warning/warning.go b/vendor/github.com/regclient/regclient/types/warning/warning.go index 03332f838..9612fc727 100644 --- a/vendor/github.com/regclient/regclient/types/warning/warning.go +++ b/vendor/github.com/regclient/regclient/types/warning/warning.go @@ -77,6 +77,6 @@ func Handle(ctx context.Context, log *logrus.Logger, msg string) { func logMsg(log *logrus.Logger, msg string) { log.WithFields(logrus.Fields{ - msg: msg, + "warning": msg, }).Warn("Registry warning message") } diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 000000000..eb3d5f517 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,28 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 000000000..8a7f0877d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2022 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 000000000..554718521 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,77 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 000000000..5f7ec01b3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 000000000..a3d6f1925 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,372 @@ +# TODO list + +## Release v0.5.x + +1. Support check flag in gxz command. + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Sincex the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz KÅ‚ak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 000000000..b30f1ec97 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000000000000000000000000000000000..46043f7dc89b610dc3badb9db3426620c4c97462 GIT binary patch literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 000000000..dae159db5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 000000000..b4cf8b75e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 000000000..5322342ee --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 000000000..a98983356 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 000000000..f4627ea11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,456 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 000000000..2b39da6f7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 000000000..201091709 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 000000000..9dfdf28b2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 000000000..af41d5b2d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 000000000..f27e31a4a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 000000000..3765484e6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 000000000..d5b814f0a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 000000000..76b713106 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 000000000..b447d8ec4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 000000000..e40938318 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 000000000..4b3916eab --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 000000000..f66e9cdd9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 000000000..1ae7d80ca --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 000000000..081fc840b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 000000000..1ea5320a0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,115 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 000000000..e4ef5fc59 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 000000000..02dfb8bf5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 000000000..7b7eddc3d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 000000000..2feccba11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 000000000..15b754ccb --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 000000000..4b0fee3ff --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 000000000..ae911c389 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + r.h.dictCap = MinDictCap + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 000000000..f36e26505 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 000000000..34779c513 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 000000000..36b29b598 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 000000000..97bbafa11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 000000000..bd5f42ee8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 000000000..a8c612ce1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 000000000..6a56a2612 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 000000000..bde1412cf --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 000000000..f693e0aef --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index fbc6df790..2346df135 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d66..4fea3027a 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 11b465976..6d6cd5f4d 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9de08927b..a17035cb6 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5d..0b8540c21 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index c8dd3358a..6743930b8 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 6205fe48a..c4d300323 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index c4f3bca3d..43d357ac9 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 00ac5fe3a..8904cd087 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf5..cc2b4e07b 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4..044625415 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a12..308c9781e 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab86979..9685169b2 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b18efb743..948a3ee63 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd..b0e419857 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc69937..2f0fa76e4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4db..2b57e0f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e6..5682e2628 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d2712..87d8612a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955ee..eff6bcdef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1550,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 000000000..47a9a5411 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 000000000..2ef36bbcf --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 000000000..14656b65a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb similarity index 69% rename from vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb rename to vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 1a8610a84..18f075687 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb @@ -1,4 +1,4 @@ -  (0æ +  (0æ   (0ç   (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 193c68e8f..8826bcf40 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -68,7 +68,7 @@ type ( Extensions Extensions Services Services - EditionFeatures FileEditionFeatures + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage @@ -76,10 +76,13 @@ type ( Locations SourceLocations } - FileEditionFeatures struct { + EditionFeatures struct { // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool @@ -95,6 +98,9 @@ type ( // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool } ) @@ -156,6 +162,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -217,6 +225,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -250,8 +260,7 @@ type ( Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor - // Edition features. - Presence bool + EditionFeatures EditionFeatures } Oneof struct { @@ -261,6 +270,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -310,26 +321,36 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + if fd.L1.Cardinality == protoreflect.Repeated { + return false } - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence + return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsPacked + } + if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { + // proto3 repeated fields are packed by default. + return !fd.L1.HasPacked || fd.L1.IsPacked } return fd.L1.IsPacked } @@ -378,6 +399,9 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.EditionFeatures.IsUTF8Validated + } if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } @@ -404,10 +428,11 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9d..237e64fd2 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -113,6 +115,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +124,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +160,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -166,6 +179,15 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Syntax = protoreflect.Proto2 } + if fd.L1.Syntax == protoreflect.Editions { + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + } + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) + } + // Must allocate all declarations before parsing each descriptor type // to ensure we handled all descriptors in "flattened ordering". if numEnums > 0 { @@ -219,6 +241,28 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd @@ -275,6 +319,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +425,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a75..482a61cc1 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -414,6 +414,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +466,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -497,6 +504,13 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.HasEnforceUTF8 = true fd.L1.EnforceUTF8 = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -534,6 +548,7 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte + xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -565,6 +580,12 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } + if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } + if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { + xd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -589,6 +610,13 @@ func (xd *Extension) unmarshalOptions(b []byte) { case genid.FieldOptions_Packed_field_number: xd.L2.IsPacked = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 000000000..0375a49d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + if def, ok := defaultsCache[ed]; ok { + return def + } + panic(fmt.Sprintf("unsupported edition: %v", ed)) +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 8f94230ea..40272c893 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -18,6 +18,21 @@ const ( Edition_enum_name = "Edition" ) +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -213,6 +228,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -297,12 +318,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -474,7 +524,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -497,7 +546,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -523,7 +571,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -543,6 +590,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -639,24 +693,59 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + // Names for google.protobuf.FieldOptions.EditionDefault. const ( FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" @@ -813,6 +902,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -909,36 +1005,79 @@ const ( FeatureSet_FieldPresence_enum_name = "FieldPresence" ) +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.EnumType. const ( FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" FeatureSet_EnumType_enum_name = "EnumType" ) +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. const ( FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" ) +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.Utf8Validation. const ( FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" FeatureSet_Utf8Validation_enum_name = "Utf8Validation" ) +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + // Full and short names for google.protobuf.FeatureSet.MessageEncoding. const ( FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" FeatureSet_MessageEncoding_enum_name = "MessageEncoding" ) +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + // Full and short names for google.protobuf.FeatureSet.JsonFormat. const ( FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" FeatureSet_JsonFormat_enum_name = "JsonFormat" ) +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" @@ -1085,3 +1224,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 000000000..fd9015e8e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b2..ad6f80c46 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea0..49bc73e25 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc5..2b8f122c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3aa..13077751e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60e..986322b19 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e7658..a6e7df244 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index d8f48faff..a50fcfb49 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 32 + Minor = 33 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index aff6fd490..b3278163c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,9 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) + } if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +118,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -139,12 +164,16 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc } if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } // We reuse the existing field because the old option `[packed = // true]` is mutually exclusive with the editions feature. - if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + if canBePacked(fd) { f.L1.HasPacked = true - f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + f.L1.IsPacked = f.L1.EditionFeatures.IsPacked } // We pretend this option is always explicitly set because the only @@ -155,9 +184,9 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc // requested from the descriptor). // In proto2/proto3 syntax HasEnforceUTF8 might be false. f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated - if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { f.L1.Kind = protoreflect.GroupKind } } @@ -175,6 +204,9 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } + if parent.Syntax() == protoreflect.Editions { + o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) + } } } return os, nil diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e3501..254ca5854 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -276,8 +276,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d5648..e4dcaf876 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -107,7 +107,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } if m.Syntax() == protoreflect.Proto3 { @@ -314,8 +314,8 @@ func checkValidGroup(fd protoreflect.FieldDescriptor) error { switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case fd.Syntax() == protoreflect.Proto3: + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") case fd.FullName().Parent() != md.FullName().Parent(): diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 7352926ca..2a6b29d17 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -5,14 +5,16 @@ package protodesc import ( - _ "embed" "fmt" "os" "sync" + "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) const ( @@ -20,14 +22,12 @@ const ( SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 ) -//go:embed editions_defaults.binpb -var binaryEditionDefaults []byte var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) func init() { - err := proto.Unmarshal(binaryEditionDefaults, defaults) + err := proto.Unmarshal(editiondefaults.Defaults, defaults) if err != nil { fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) os.Exit(1) @@ -83,95 +83,66 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { return fs } -func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.FieldPresence == nil { - return fileDesc.L1.EditionFeatures.IsFieldPresence +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) } - return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT -} - -func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.RepeatedFieldEncoding == nil { - return fileDesc.L1.EditionFeatures.IsPacked + if child == nil { + return parentFS } - return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED -} - -func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.Utf8Validation == nil { - return fileDesc.L1.EditionFeatures.IsUTF8Validated + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED } - return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY -} - -func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { - fs := fieldDesc.GetOptions().GetFeatures() - if fs == nil || fs.MessageEncoding == nil { - return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN } - return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED -} -// initFileDescFromFeatureSet initializes editions related fields in fd based -// on fs. If fs is nil it is assumed to be an empty featureset and all fields -// will be initialized with the appropriate default. fd.L1.Edition must be set -// before calling this function. -func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { - dfs := getFeatureSetFor(fd.L1.Edition) - if fs == nil { - fs = &descriptorpb.FeatureSet{} + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED } - var fieldPresence descriptorpb.FeatureSet_FieldPresence - if fp := fs.FieldPresence; fp != nil { - fieldPresence = *fp - } else { - fieldPresence = *dfs.FieldPresence - } - fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || - fieldPresence == descriptorpb.FeatureSet_EXPLICIT - - var enumType descriptorpb.FeatureSet_EnumType - if et := fs.EnumType; et != nil { - enumType = *et - } else { - enumType = *dfs.EnumType + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY } - fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN - var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding - if rfe := fs.RepeatedFieldEncoding; rfe != nil { - respeatedFieldEncoding = *rfe - } else { - respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED } - fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED - var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation - if utf8val := fs.Utf8Validation; utf8val != nil { - isUTF8Validated = *utf8val - } else { - isUTF8Validated = *dfs.Utf8Validation + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY - var messageEncoding descriptorpb.FeatureSet_MessageEncoding - if me := fs.MessageEncoding; me != nil { - messageEncoding = *me - } else { - messageEncoding = *dfs.MessageEncoding + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } } - fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED - var jsonFormat descriptorpb.FeatureSet_JsonFormat - if jf := fs.JsonFormat; jf != nil { - jsonFormat = *jf - } else { - jsonFormat = *dfs.JsonFormat - } - fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index ec6572dfd..00b01fbd8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -175,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 0c045db6a..7dcc2ff09 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 38daa858d..78624cf60 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -64,6 +64,7 @@ const ( // should not be depended on, but they will always be time-ordered for easy // comparison. Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be // used or relyed on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 @@ -71,31 +72,39 @@ const ( Edition_EDITION_99997_TEST_ONLY Edition = 99997 Edition_EDITION_99998_TEST_ONLY Edition = 99998 Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 ) // Enum value maps for Edition. var ( Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, + "EDITION_2024": 1001, "EDITION_1_TEST_ONLY": 1, "EDITION_2_TEST_ONLY": 2, "EDITION_99997_TEST_ONLY": 99997, "EDITION_99998_TEST_ONLY": 99998, "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, } ) @@ -954,21 +963,21 @@ type FeatureSet_Utf8Validation int32 const ( FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_NONE FeatureSet_Utf8Validation = 1 FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 ) // Enum value maps for FeatureSet_Utf8Validation. var ( FeatureSet_Utf8Validation_name = map[int32]string{ 0: "UTF8_VALIDATION_UNKNOWN", - 1: "NONE", 2: "VERIFY", + 3: "NONE", } FeatureSet_Utf8Validation_value = map[string]int32{ "UTF8_VALIDATION_UNKNOWN": 0, - "NONE": 1, "VERIFY": 2, + "NONE": 3, } ) @@ -1643,12 +1652,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -2195,7 +2204,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -2244,7 +2252,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -2352,13 +2359,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2472,10 +2472,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2496,6 +2492,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2655,19 +2655,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -4104,7 +4096,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -4451,7 +4443,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, @@ -4468,337 +4460,355 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, - 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, - 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, - 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, - 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, - 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, - 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, - 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, - 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, - 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, - 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, - 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, - 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, - 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, - 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, - 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, + 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, + 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, + 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, + 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, + 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, + 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, + 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, - 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, - 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, - 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, - 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, - 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, - 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, - 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, - 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, - 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, - 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, - 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, - 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, - 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, - 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, - 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, + 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, + 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, + 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, + 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, + 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, + 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, + 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, + 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, @@ -4807,276 +4817,258 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, - 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, - 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, - 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, - 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, - 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, - 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, - 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, - 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, - 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, - 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, - 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, - 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, - 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, - 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, - 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, - 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, - 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, - 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, - 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, - 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, - 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, - 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, - 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, - 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, - 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, + 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, + 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, + 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, + 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, + 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, + 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, + 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, + 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, + 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, - 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, - 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, - 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, - 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, - 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, - 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, - 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, + 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, + 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, + 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, + 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, + 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, + 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, + 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, + 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, + 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, + 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, + 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, + 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, + 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, + 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, + 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, + 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, + 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, + 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, + 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 000000000..25de5ae00 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,177 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflect/protodesc/proto/go_features.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "google.protobuf.go", + Tag: "bytes,1002,opt,name=go", + Filename: "reflect/protodesc/proto/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional google.protobuf.GoFeatures go = 1002; + E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] +) + +var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor + +var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, + 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, + 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, + 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, + 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, + 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, + 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +} + +var ( + file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once + file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc +) + +func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { + file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { + file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) + }) + return file_reflect_protodesc_proto_go_features_proto_rawDescData +} + +var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ + (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ + 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet + 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_reflect_protodesc_proto_go_features_proto_init() } +func file_reflect_protodesc_proto_go_features_proto_init() { + if File_reflect_protodesc_proto_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, + DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, + MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, + ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + }.Build() + File_reflect_protodesc_proto_go_features_proto = out.File + file_reflect_protodesc_proto_go_features_proto_rawDesc = nil + file_reflect_protodesc_proto_go_features_proto_goTypes = nil + file_reflect_protodesc_proto_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto new file mode 100644 index 000000000..d24657129 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto @@ -0,0 +1,28 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +syntax = "proto2"; + +package google.protobuf; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/protobuf/types/gofeaturespb"; + +extend google.protobuf.FeatureSet { + optional GoFeatures go = 1002; +} + +message GoFeatures { + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + optional bool legacy_unmarshal_json_enum = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + edition_defaults = { edition: EDITION_PROTO2, value: "true" }, + edition_defaults = { edition: EDITION_PROTO3, value: "false" } + ]; +} diff --git a/vendor/helm.sh/helm/v3/internal/version/version.go b/vendor/helm.sh/helm/v3/internal/version/version.go index b29891ec6..414957bc9 100644 --- a/vendor/helm.sh/helm/v3/internal/version/version.go +++ b/vendor/helm.sh/helm/v3/internal/version/version.go @@ -29,7 +29,7 @@ var ( // // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. - version = "v3.13" + version = "v3.14" // metadata is extra build time data metadata = "" diff --git a/vendor/helm.sh/helm/v3/pkg/action/hooks.go b/vendor/helm.sh/helm/v3/pkg/action/hooks.go index 40c1ffdb6..0af625dff 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/hooks.go +++ b/vendor/helm.sh/helm/v3/pkg/action/hooks.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" + "helm.sh/helm/v3/pkg/kube" "helm.sh/helm/v3/pkg/release" helmtime "helm.sh/helm/v3/pkg/time" ) @@ -51,7 +52,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.DeletePolicies = []release.HookDeletePolicy{release.HookBeforeHookCreation} } - if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookBeforeHookCreation, timeout); err != nil { return err } @@ -88,7 +89,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, h.LastRun.Phase = release.HookPhaseFailed // If a hook is failed, check the annotation of the hook to determine whether the hook should be deleted // under failed condition. If so, then clear the corresponding resource object in the hook - if err := cfg.deleteHookByPolicy(h, release.HookFailed); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookFailed, timeout); err != nil { return err } return err @@ -99,7 +100,7 @@ func (cfg *Configuration) execHook(rl *release.Release, hook release.HookEvent, // If all hooks are successful, check the annotation of each hook to determine whether the hook should be deleted // under succeeded condition. If so, then clear the corresponding resource object in each hook for _, h := range executingHooks { - if err := cfg.deleteHookByPolicy(h, release.HookSucceeded); err != nil { + if err := cfg.deleteHookByPolicy(h, release.HookSucceeded, timeout); err != nil { return err } } @@ -120,7 +121,7 @@ func (x hookByWeight) Less(i, j int) bool { } // deleteHookByPolicy deletes a hook if the hook policy instructs it to -func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy) error { +func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.HookDeletePolicy, timeout time.Duration) error { // Never delete CustomResourceDefinitions; this could cause lots of // cascading garbage collection. if h.Kind == "CustomResourceDefinition" { @@ -135,6 +136,13 @@ func (cfg *Configuration) deleteHookByPolicy(h *release.Hook, policy release.Hoo if len(errs) > 0 { return errors.New(joinErrors(errs)) } + + //wait for resources until they are deleted to avoid conflicts + if kubeClient, ok := cfg.KubeClient.(kube.InterfaceExt); ok { + if err := kubeClient.WaitForDelete(resources, timeout); err != nil { + return err + } + } } return nil } diff --git a/vendor/helm.sh/helm/v3/pkg/action/lint.go b/vendor/helm.sh/helm/v3/pkg/action/lint.go index e71cfe733..ca497f2b8 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/lint.go +++ b/vendor/helm.sh/helm/v3/pkg/action/lint.go @@ -36,6 +36,7 @@ type Lint struct { Namespace string WithSubcharts bool Quiet bool + KubeVersion *chartutil.KubeVersion } // LintResult is the result of Lint @@ -58,7 +59,7 @@ func (l *Lint) Run(paths []string, vals map[string]interface{}) *LintResult { } result := &LintResult{} for _, path := range paths { - linter, err := lintChart(path, vals, l.Namespace, l.Strict) + linter, err := lintChart(path, vals, l.Namespace, l.KubeVersion) if err != nil { result.Errors = append(result.Errors, err) continue @@ -85,7 +86,7 @@ func HasWarningsOrErrors(result *LintResult) bool { return len(result.Errors) > 0 } -func lintChart(path string, vals map[string]interface{}, namespace string, strict bool) (support.Linter, error) { +func lintChart(path string, vals map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) (support.Linter, error) { var chartPath string linter := support.Linter{} @@ -124,5 +125,5 @@ func lintChart(path string, vals map[string]interface{}, namespace string, stric return linter, errors.Wrap(err, "unable to check Chart.yaml file in chart") } - return lint.All(chartPath, vals, namespace, strict), nil + return lint.AllWithKubeVersion(chartPath, vals, namespace, kubeVersion), nil } diff --git a/vendor/helm.sh/helm/v3/pkg/action/package.go b/vendor/helm.sh/helm/v3/pkg/action/package.go index 698169032..b79fcb54f 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/package.go +++ b/vendor/helm.sh/helm/v3/pkg/action/package.go @@ -54,7 +54,7 @@ func NewPackage() *Package { } // Run executes 'helm package' against the given chart and returns the path to the packaged chart. -func (p *Package) Run(path string, vals map[string]interface{}) (string, error) { +func (p *Package) Run(path string, _ map[string]interface{}) (string, error) { ch, err := loader.LoadDir(path) if err != nil { return "", err diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_login.go b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go index a55f2de58..cd144e1e7 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/registry_login.go +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_login.go @@ -73,7 +73,7 @@ func NewRegistryLogin(cfg *Configuration) *RegistryLogin { } // Run executes the registry login operation -func (a *RegistryLogin) Run(out io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error { +func (a *RegistryLogin) Run(_ io.Writer, hostname string, username string, password string, opts ...RegistryLoginOpt) error { for _, opt := range opts { if err := opt(a); err != nil { return err diff --git a/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go index 69add4163..7ce92defc 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go +++ b/vendor/helm.sh/helm/v3/pkg/action/registry_logout.go @@ -33,6 +33,6 @@ func NewRegistryLogout(cfg *Configuration) *RegistryLogout { } // Run executes the registry logout operation -func (a *RegistryLogout) Run(out io.Writer, hostname string) error { +func (a *RegistryLogout) Run(_ io.Writer, hostname string) error { return a.cfg.RegistryClient.Logout(hostname) } diff --git a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go index c74b502e2..ffb7538a6 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/upgrade.go +++ b/vendor/helm.sh/helm/v3/pkg/action/upgrade.go @@ -82,6 +82,8 @@ type Upgrade struct { ResetValues bool // ReuseValues will re-use the user's last supplied values. ReuseValues bool + // ResetThenReuseValues will reset the values to the chart's built-ins then merge with user's last supplied values. + ResetThenReuseValues bool // Recreate will (if true) recreate pods after a rollback. Recreate bool // MaxHistory limits the maximum number of revisions saved per release @@ -552,6 +554,15 @@ func (u *Upgrade) reuseValues(chart *chart.Chart, current *release.Release, newV return newVals, nil } + // If the ResetThenReuseValues flag is set, we use the new chart's values, but we copy the old config's values over the new config's values. + if u.ResetThenReuseValues { + u.cfg.Log("merging values from old release to new values") + + newVals = chartutil.CoalesceTables(newVals, current.Config) + + return newVals, nil + } + if len(newVals) == 0 && len(current.Config) > 0 { u.cfg.Log("copying values from %s (v%d) to new release.", current.Name, current.Version) newVals = current.Config diff --git a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go index 489eea93c..9bcbee60c 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/loader/directory.go @@ -25,9 +25,9 @@ import ( "github.com/pkg/errors" - "helm.sh/helm/v3/internal/ignore" "helm.sh/helm/v3/internal/sympath" "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/ignore" ) var utf8bom = []byte{0xEF, 0xBB, 0xBF} diff --git a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go index ae572abb7..a08a97cd1 100644 --- a/vendor/helm.sh/helm/v3/pkg/chart/metadata.go +++ b/vendor/helm.sh/helm/v3/pkg/chart/metadata.go @@ -16,6 +16,7 @@ limitations under the License. package chart import ( + "path/filepath" "strings" "unicode" @@ -110,6 +111,11 @@ func (md *Metadata) Validate() error { if md.Name == "" { return ValidationError("chart.metadata.name is required") } + + if md.Name != filepath.Base(md.Name) { + return ValidationErrorf("chart.metadata.name %q is invalid", md.Name) + } + if md.Version == "" { return ValidationError("chart.metadata.version is required") } @@ -128,10 +134,19 @@ func (md *Metadata) Validate() error { // Aliases need to be validated here to make sure that the alias name does // not contain any illegal characters. + dependencies := map[string]*Dependency{} for _, dependency := range md.Dependencies { if err := dependency.Validate(); err != nil { return err } + key := dependency.Name + if dependency.Alias != "" { + key = dependency.Alias + } + if dependencies[key] != nil { + return ValidationErrorf("more than one dependency with name or alias %q", key) + } + dependencies[key] = dependency } return nil } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go index 6cf23a122..f0272fd6a 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go @@ -129,7 +129,7 @@ func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{} // coalesceGlobals copies the globals out of src and merges them into dest. // // For convenience, returns dest. -func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, merge bool) { +func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string, _ bool) { var dg, sg map[string]interface{} if destglob, ok := dest[GlobalKey]; !ok { diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go index 30d0556e5..0bb5a83cd 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/create.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/create.go @@ -175,6 +175,15 @@ resources: {} # cpu: 100m # memory: 128Mi +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + autoscaling: enabled: false minReplicas: 1 @@ -333,13 +342,9 @@ spec: containerPort: {{ .Values.service.port }} protocol: TCP livenessProbe: - httpGet: - path: / - port: http + {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: - httpGet: - path: / - port: http + {{- toYaml .Values.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.volumeMounts }} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go index 8dbb5ef11..205d99e09 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go @@ -59,9 +59,8 @@ func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath s if bv, ok := vv.(bool); ok { r.Enabled = bv break - } else { - log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) } + log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) } else if _, ok := err.(ErrNoValue); !ok { // this is a real error log.Printf("Warning: PathValue returned error %v", err) diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go index fcdcc27ea..0a4046d2e 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go @@ -33,3 +33,11 @@ type ErrNoValue struct { } func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } + +type ErrInvalidChartName struct { + Name string +} + +func (e ErrInvalidChartName) Error() string { + return fmt.Sprintf("%q is not a valid chart name", e.Name) +} diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go index 2ce4eddaf..4ee90709c 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/save.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/save.go @@ -39,6 +39,10 @@ var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") // directory, writing the chart's contents to that subdirectory. func SaveDir(c *chart.Chart, dest string) error { // Create the chart directory + err := validateName(c.Name()) + if err != nil { + return err + } outdir := filepath.Join(dest, c.Name()) if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { return errors.Errorf("file %s already exists and is not a directory", outdir) @@ -149,6 +153,10 @@ func Save(c *chart.Chart, outDir string) (string, error) { } func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { + err := validateName(c.Name()) + if err != nil { + return err + } base := filepath.Join(prefix, c.Name()) // Pull out the dependencies of a v1 Chart, since there's no way @@ -242,3 +250,15 @@ func writeToTar(out *tar.Writer, name string, body []byte) error { _, err := out.Write(body) return err } + +// If the name has directory name has characters which would change the location +// they need to be removed. +func validateName(name string) error { + nname := filepath.Base(name) + + if nname != name { + return ErrInvalidChartName{name} + } + + return nil +} diff --git a/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/vendor/helm.sh/helm/v3/pkg/cli/environment.go index dac2a4bc1..4f74f2642 100644 --- a/vendor/helm.sh/helm/v3/pkg/cli/environment.go +++ b/vendor/helm.sh/helm/v3/pkg/cli/environment.go @@ -44,6 +44,9 @@ const defaultMaxHistory = 10 // defaultBurstLimit sets the default client-side throttling limit const defaultBurstLimit = 100 +// defaultQPS sets the default QPS value to 0 to to use library defaults unless specified +const defaultQPS = float32(0) + // EnvSettings describes all of the environment settings. type EnvSettings struct { namespace string @@ -83,6 +86,8 @@ type EnvSettings struct { MaxHistory int // BurstLimit is the default client-side throttling limit. BurstLimit int + // QPS is queries per second which may be used to avoid throttling. + QPS float32 } func New() *EnvSettings { @@ -102,6 +107,7 @@ func New() *EnvSettings { RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")), RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")), BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit), + QPS: envFloat32Or("HELM_QPS", defaultQPS), } env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG")) @@ -119,6 +125,7 @@ func New() *EnvSettings { ImpersonateGroup: &env.KubeAsGroups, WrapConfigFn: func(config *rest.Config) *rest.Config { config.Burst = env.BurstLimit + config.QPS = env.QPS config.Wrap(func(rt http.RoundTripper) http.RoundTripper { return &retryingRoundTripper{wrapped: rt} }) @@ -146,6 +153,7 @@ func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs") fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the file containing cached repository indexes") fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit") + fs.Float32Var(&s.QPS, "qps", s.QPS, "queries per second used when communicating with the Kubernetes API, not including bursting") } func envOr(name, def string) string { @@ -179,6 +187,18 @@ func envIntOr(name string, def int) int { return ret } +func envFloat32Or(name string, def float32) float32 { + if name == "" { + return def + } + envVal := envOr(name, strconv.FormatFloat(float64(def), 'f', 2, 32)) + ret, err := strconv.ParseFloat(envVal, 32) + if err != nil { + return def + } + return float32(ret) +} + func envCSV(name string) (ls []string) { trimmed := strings.Trim(os.Getenv(name), ", ") if trimmed != "" { @@ -201,6 +221,7 @@ func (s *EnvSettings) EnvVars() map[string]string { "HELM_NAMESPACE": s.Namespace(), "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory), "HELM_BURST_LIMIT": strconv.Itoa(s.BurstLimit), + "HELM_QPS": strconv.FormatFloat(float64(s.QPS), 'f', 2, 32), // broken, these are populated from helm flags and not kubeconfig. "HELM_KUBECONTEXT": s.KubeContext, diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go index a5b0af080..68c9c6e00 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -714,15 +714,21 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]* var entry repo.ChartVersions entry, err = findEntryByName(name, cr) if err != nil { + // TODO: Where linting is skipped in this function we should + // refactor to remove naked returns while ensuring the same + // behavior + //nolint:nakedret return } var ve *repo.ChartVersion ve, err = findVersionedEntry(version, entry) if err != nil { + //nolint:nakedret return } url, err = normalizeURL(repoURL, ve.URLs[0]) if err != nil { + //nolint:nakedret return } username = cr.Config.Username @@ -732,6 +738,7 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]* caFile = cr.Config.CAFile certFile = cr.Config.CertFile keyFile = cr.Config.KeyFile + //nolint:nakedret return } } diff --git a/vendor/helm.sh/helm/v3/pkg/engine/engine.go b/vendor/helm.sh/helm/v3/pkg/engine/engine.go index 150be16b7..61c0782fc 100644 --- a/vendor/helm.sh/helm/v3/pkg/engine/engine.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/engine.go @@ -40,16 +40,17 @@ type Engine struct { Strict bool // In LintMode, some 'required' template values may be missing, so don't fail LintMode bool - // the rest config to connect to the kubernetes api - config *rest.Config + // optional provider of clients to talk to the Kubernetes API + clientProvider *ClientProvider // EnableDNS tells the engine to allow DNS lookups when rendering templates EnableDNS bool } // New creates a new instance of Engine using the passed in rest config. func New(config *rest.Config) Engine { + var clientProvider ClientProvider = clientProviderFromConfig{config} return Engine{ - config: config, + clientProvider: &clientProvider, } } @@ -85,10 +86,21 @@ func Render(chrt *chart.Chart, values chartutil.Values) (map[string]string, erro // RenderWithClient takes a chart, optional values, and value overrides, and attempts to // render the Go templates using the default options. This engine is client aware and so can have template -// functions that interact with the client +// functions that interact with the client. func RenderWithClient(chrt *chart.Chart, values chartutil.Values, config *rest.Config) (map[string]string, error) { + var clientProvider ClientProvider = clientProviderFromConfig{config} return Engine{ - config: config, + clientProvider: &clientProvider, + }.Render(chrt, values) +} + +// RenderWithClientProvider takes a chart, optional values, and value overrides, and attempts to +// render the Go templates using the default options. This engine is client aware and so can have template +// functions that interact with the client. +// This function differs from RenderWithClient in that it lets you customize the way a dynamic client is constructed. +func RenderWithClientProvider(chrt *chart.Chart, values chartutil.Values, clientProvider ClientProvider) (map[string]string, error) { + return Engine{ + clientProvider: &clientProvider, }.Render(chrt, values) } @@ -112,13 +124,10 @@ func warnWrap(warn string) string { return warnStartDelim + warn + warnEndDelim } -// initFunMap creates the Engine's FuncMap and adds context-specific functions. -func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]renderable) { - funcMap := funcMap() - includedNames := make(map[string]int) - - // Add the 'include' function here so we can close over t. - funcMap["include"] = func(name string, data interface{}) (string, error) { +// 'include' needs to be defined in the scope of a 'tpl' template as +// well as regular file-loaded templates. +func includeFun(t *template.Template, includedNames map[string]int) func(string, interface{}) (string, error) { + return func(name string, data interface{}) (string, error) { var buf strings.Builder if v, ok := includedNames[name]; ok { if v > recursionMaxNums { @@ -132,33 +141,62 @@ func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]render includedNames[name]-- return buf.String(), err } +} - // Add the 'tpl' function here - funcMap["tpl"] = func(tpl string, vals chartutil.Values) (string, error) { - basePath, err := vals.PathValue("Template.BasePath") +// As does 'tpl', so that nested calls to 'tpl' see the templates +// defined by their enclosing contexts. +func tplFun(parent *template.Template, includedNames map[string]int, strict bool) func(string, interface{}) (string, error) { + return func(tpl string, vals interface{}) (string, error) { + t, err := parent.Clone() if err != nil { - return "", errors.Wrapf(err, "cannot retrieve Template.Basepath from values inside tpl function: %s", tpl) + return "", errors.Wrapf(err, "cannot clone template") } - templateName, err := vals.PathValue("Template.Name") - if err != nil { - return "", errors.Wrapf(err, "cannot retrieve Template.Name from values inside tpl function: %s", tpl) + // Re-inject the missingkey option, see text/template issue https://github.com/golang/go/issues/43022 + // We have to go by strict from our engine configuration, as the option fields are private in Template. + // TODO: Remove workaround (and the strict parameter) once we build only with golang versions with a fix. + if strict { + t.Option("missingkey=error") + } else { + t.Option("missingkey=zero") } - templates := map[string]renderable{ - templateName.(string): { - tpl: tpl, - vals: vals, - basePath: basePath.(string), - }, + // Re-inject 'include' so that it can close over our clone of t; + // this lets any 'define's inside tpl be 'include'd. + t.Funcs(template.FuncMap{ + "include": includeFun(t, includedNames), + "tpl": tplFun(t, includedNames, strict), + }) + + // We need a .New template, as template text which is just blanks + // or comments after parsing out defines just addes new named + // template definitions without changing the main template. + // https://pkg.go.dev/text/template#Template.Parse + // Use the parent's name for lack of a better way to identify the tpl + // text string. (Maybe we could use a hash appended to the name?) + t, err = t.New(parent.Name()).Parse(tpl) + if err != nil { + return "", errors.Wrapf(err, "cannot parse template %q", tpl) } - result, err := e.renderWithReferences(templates, referenceTpls) - if err != nil { + var buf strings.Builder + if err := t.Execute(&buf, vals); err != nil { return "", errors.Wrapf(err, "error during tpl function execution for %q", tpl) } - return result[templateName.(string)], nil + + // See comment in renderWithReferences explaining the hack. + return strings.ReplaceAll(buf.String(), "", ""), nil } +} + +// initFunMap creates the Engine's FuncMap and adds context-specific functions. +func (e Engine) initFunMap(t *template.Template) { + funcMap := funcMap() + includedNames := make(map[string]int) + + // Add the template-rendering functions here so we can close over t. + funcMap["include"] = includeFun(t, includedNames) + funcMap["tpl"] = tplFun(t, includedNames, e.Strict) // Add the `required` function here so we can use lintMode funcMap["required"] = func(warn string, val interface{}) (interface{}, error) { @@ -194,8 +232,8 @@ func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]render // If we are not linting and have a cluster connection, provide a Kubernetes-backed // implementation. - if !e.LintMode && e.config != nil { - funcMap["lookup"] = NewLookupFunction(e.config) + if !e.LintMode && e.clientProvider != nil { + funcMap["lookup"] = newLookupFunction(*e.clientProvider) } // When DNS lookups are not enabled override the sprig function and return @@ -210,13 +248,7 @@ func (e Engine) initFunMap(t *template.Template, referenceTpls map[string]render } // render takes a map of templates/values and renders them. -func (e Engine) render(tpls map[string]renderable) (map[string]string, error) { - return e.renderWithReferences(tpls, tpls) -} - -// renderWithReferences takes a map of templates/values to render, and a map of -// templates which can be referenced within them. -func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) (rendered map[string]string, err error) { +func (e Engine) render(tpls map[string]renderable) (rendered map[string]string, err error) { // Basically, what we do here is start with an empty parent template and then // build up a list of templates -- one for each file. Once all of the templates // have been parsed, we loop through again and execute every template. @@ -238,12 +270,11 @@ func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) t.Option("missingkey=zero") } - e.initFunMap(t, referenceTpls) + e.initFunMap(t) // We want to parse the templates in a predictable order. The order favors // higher-level (in file system) templates over deeply nested templates. keys := sortTemplates(tpls) - referenceKeys := sortTemplates(referenceTpls) for _, filename := range keys { r := tpls[filename] @@ -252,17 +283,6 @@ func (e Engine) renderWithReferences(tpls, referenceTpls map[string]renderable) } } - // Adding the reference templates to the template context - // so they can be referenced in the tpl function - for _, filename := range referenceKeys { - if t.Lookup(filename) == nil { - r := referenceTpls[filename] - if _, err := t.New(filename).Parse(r.tpl); err != nil { - return map[string]string{}, cleanupParseError(filename, err) - } - } - } - rendered = make(map[string]string, len(keys)) for _, filename := range keys { // Don't render partials. We don't care out the direct output of partials. diff --git a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go index b378ca9d6..86a7d698c 100644 --- a/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go +++ b/vendor/helm.sh/helm/v3/pkg/engine/lookup_func.go @@ -39,9 +39,28 @@ type lookupFunc = func(apiversion string, resource string, namespace string, nam // This function is considered deprecated, and will be renamed in Helm 4. It will no // longer be a public function. func NewLookupFunction(config *rest.Config) lookupFunc { - return func(apiversion string, resource string, namespace string, name string) (map[string]interface{}, error) { + return newLookupFunction(clientProviderFromConfig{config: config}) +} + +type ClientProvider interface { + // GetClientFor returns a dynamic.NamespaceableResourceInterface suitable for interacting with resources + // corresponding to the provided apiVersion and kind, as well as a boolean indicating whether the resources + // are namespaced. + GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) +} + +type clientProviderFromConfig struct { + config *rest.Config +} + +func (c clientProviderFromConfig) GetClientFor(apiVersion, kind string) (dynamic.NamespaceableResourceInterface, bool, error) { + return getDynamicClientOnKind(apiVersion, kind, c.config) +} + +func newLookupFunction(clientProvider ClientProvider) lookupFunc { + return func(apiversion string, kind string, namespace string, name string) (map[string]interface{}, error) { var client dynamic.ResourceInterface - c, namespaced, err := getDynamicClientOnKind(apiversion, resource, config) + c, namespaced, err := clientProvider.GetClientFor(apiversion, kind) if err != nil { return map[string]interface{}{}, err } diff --git a/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go index 0d13ade57..a371b52eb 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go @@ -17,6 +17,7 @@ package getter import ( "bytes" + "fmt" "os" "os/exec" "path/filepath" @@ -62,6 +63,13 @@ type pluginGetter struct { opts options } +func (p *pluginGetter) setupOptionsEnv(env []string) []string { + env = append(env, fmt.Sprintf("HELM_PLUGIN_USERNAME=%s", p.opts.username)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASSWORD=%s", p.opts.password)) + env = append(env, fmt.Sprintf("HELM_PLUGIN_PASS_CREDENTIALS_ALL=%t", p.opts.passCredentialsAll)) + return env +} + // Get runs downloader plugin command func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { for _, opt := range options { @@ -71,7 +79,7 @@ func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href) prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) plugin.SetupPluginEnv(p.settings, p.name, p.base) - prog.Env = os.Environ() + prog.Env = p.setupOptionsEnv(os.Environ()) buf := bytes.NewBuffer(nil) prog.Stdout = buf prog.Stderr = os.Stderr diff --git a/vendor/helm.sh/helm/v3/internal/ignore/doc.go b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go similarity index 97% rename from vendor/helm.sh/helm/v3/internal/ignore/doc.go rename to vendor/helm.sh/helm/v3/pkg/ignore/doc.go index a1f0fcfc8..5245d410e 100644 --- a/vendor/helm.sh/helm/v3/internal/ignore/doc.go +++ b/vendor/helm.sh/helm/v3/pkg/ignore/doc.go @@ -65,4 +65,4 @@ Notable differences from .gitignore: - The evaluation of escape sequences has not been tested for compatibility - There is no support for '\!' as a special leading sequence. */ -package ignore // import "helm.sh/helm/v3/internal/ignore" +package ignore // import "helm.sh/helm/v3/pkg/ignore" diff --git a/vendor/helm.sh/helm/v3/internal/ignore/rules.go b/vendor/helm.sh/helm/v3/pkg/ignore/rules.go similarity index 100% rename from vendor/helm.sh/helm/v3/internal/ignore/rules.go rename to vendor/helm.sh/helm/v3/pkg/ignore/rules.go diff --git a/vendor/helm.sh/helm/v3/pkg/kube/client.go b/vendor/helm.sh/helm/v3/pkg/kube/client.go index 0772678d1..9df833a43 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/client.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/client.go @@ -467,7 +467,7 @@ func (c *Client) Update(original, target ResourceList, force bool) (*Result, err // if one or more fail and collect any errors. All successfully deleted items // will be returned in the `Deleted` ResourceList that is part of the result. func (c *Client) Delete(resources ResourceList) (*Result, []error) { - return delete(c, resources, metav1.DeletePropagationBackground) + return rdelete(c, resources, metav1.DeletePropagationBackground) } // Delete deletes Kubernetes resources specified in the resources list with @@ -475,10 +475,10 @@ func (c *Client) Delete(resources ResourceList) (*Result, []error) { // if one or more fail and collect any errors. All successfully deleted items // will be returned in the `Deleted` ResourceList that is part of the result. func (c *Client) DeleteWithPropagationPolicy(resources ResourceList, policy metav1.DeletionPropagation) (*Result, []error) { - return delete(c, resources, policy) + return rdelete(c, resources, policy) } -func delete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { +func rdelete(c *Client, resources ResourceList, propagation metav1.DeletionPropagation) (*Result, []error) { var errs []error res := &Result{} mtx := sync.Mutex{} @@ -772,7 +772,7 @@ func (c *Client) waitForJob(obj runtime.Object, name string) (bool, error) { if c.Type == batch.JobComplete && c.Status == "True" { return true, nil } else if c.Type == batch.JobFailed && c.Status == "True" { - return true, errors.Errorf("job failed: %s", c.Reason) + return true, errors.Errorf("job %s failed: %s", name, c.Reason) } } diff --git a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go index e6c4b6207..cc2c84b40 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/fake/printer.go @@ -49,7 +49,7 @@ func (p *PrintingKubeClient) Create(resources kube.ResourceList) (*kube.Result, return &kube.Result{Created: resources}, nil } -func (p *PrintingKubeClient) Get(resources kube.ResourceList, related bool) (map[string][]runtime.Object, error) { +func (p *PrintingKubeClient) Get(resources kube.ResourceList, _ bool) (map[string][]runtime.Object, error) { _, err := io.Copy(p.Out, bufferize(resources)) if err != nil { return nil, err @@ -119,7 +119,7 @@ func (p *PrintingKubeClient) WaitAndGetCompletedPodPhase(_ string, _ time.Durati // DeleteWithPropagationPolicy implements KubeClient delete. // // It only prints out the content to be deleted. -func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, policy metav1.DeletionPropagation) (*kube.Result, []error) { +func (p *PrintingKubeClient) DeleteWithPropagationPolicy(resources kube.ResourceList, _ metav1.DeletionPropagation) (*kube.Result, []error) { _, err := io.Copy(p.Out, bufferize(resources)) if err != nil { return nil, []error{err} diff --git a/vendor/helm.sh/helm/v3/pkg/kube/ready.go b/vendor/helm.sh/helm/v3/pkg/kube/ready.go index 7172a42bc..b2d26ba76 100644 --- a/vendor/helm.sh/helm/v3/pkg/kube/ready.go +++ b/vendor/helm.sh/helm/v3/pkg/kube/ready.go @@ -90,13 +90,6 @@ type ReadyChecker struct { // IsReady will fetch the latest state of the object from the server prior to // performing readiness checks, and it will return any error encountered. func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, error) { - var ( - // This defaults to true, otherwise we get to a point where - // things will always return false unless one of the objects - // that manages pods has been hit - ok = true - err error - ) switch value := AsVersioned(v).(type) { case *corev1.Pod: pod, err := c.client.CoreV1().Pods(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) @@ -183,11 +176,30 @@ func (c *ReadyChecker) IsReady(ctx context.Context, v *resource.Info) (bool, err if !c.statefulSetReady(sts) { return false, nil } - case *corev1.ReplicationController, *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: - ok, err = c.podsReadyForObject(ctx, v.Namespace, value) - } - if !ok || err != nil { - return false, err + case *corev1.ReplicationController: + rc, err := c.client.CoreV1().ReplicationControllers(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicationControllerReady(rc) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } + case *extensionsv1beta1.ReplicaSet, *appsv1beta2.ReplicaSet, *appsv1.ReplicaSet: + rs, err := c.client.AppsV1().ReplicaSets(v.Namespace).Get(ctx, v.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if !c.replicaSetReady(rs) { + return false, nil + } + ready, err := c.podsReadyForObject(ctx, v.Namespace, value) + if !ready || err != nil { + return false, err + } } return true, nil } @@ -276,6 +288,16 @@ func (c *ReadyChecker) volumeReady(v *corev1.PersistentVolumeClaim) bool { } func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deployment) bool { + // Verify the replicaset readiness + if !c.replicaSetReady(rs) { + return false + } + // Verify the generation observed by the deployment controller matches the spec generation + if dep.Status.ObservedGeneration != dep.ObjectMeta.Generation { + c.log("Deployment is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", dep.Namespace, dep.Name, dep.Status.ObservedGeneration, dep.ObjectMeta.Generation) + return false + } + expectedReady := *dep.Spec.Replicas - deploymentutil.MaxUnavailable(*dep) if !(rs.Status.ReadyReplicas >= expectedReady) { c.log("Deployment is not ready: %s/%s. %d out of %d expected pods are ready", dep.Namespace, dep.Name, rs.Status.ReadyReplicas, expectedReady) @@ -285,6 +307,12 @@ func (c *ReadyChecker) deploymentReady(rs *appsv1.ReplicaSet, dep *appsv1.Deploy } func (c *ReadyChecker) daemonSetReady(ds *appsv1.DaemonSet) bool { + // Verify the generation observed by the daemonSet controller matches the spec generation + if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation { + c.log("DaemonSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.ObjectMeta.Generation) + return false + } + // If the update strategy is not a rolling update, there will be nothing to wait for if ds.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { return true @@ -355,22 +383,22 @@ func (c *ReadyChecker) crdReady(crd apiextv1.CustomResourceDefinition) bool { } func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { + // Verify the generation observed by the statefulSet controller matches the spec generation + if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation { + c.log("StatefulSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", sts.Namespace, sts.Name, sts.Status.ObservedGeneration, sts.ObjectMeta.Generation) + return false + } + // If the update strategy is not a rolling update, there will be nothing to wait for if sts.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { c.log("StatefulSet skipped ready check: %s/%s. updateStrategy is %v", sts.Namespace, sts.Name, sts.Spec.UpdateStrategy.Type) return true } - // Make sure the status is up-to-date with the StatefulSet changes - if sts.Status.ObservedGeneration < sts.Generation { - c.log("StatefulSet is not ready: %s/%s. update has not yet been observed", sts.Namespace, sts.Name) - return false - } - // Dereference all the pointers because StatefulSets like them var partition int // 1 is the default for replicas if not set - var replicas = 1 + replicas := 1 // For some reason, even if the update strategy is a rolling update, the // actual rollingUpdate field can be nil. If it is, we can safely assume // there is no partition value @@ -409,6 +437,24 @@ func (c *ReadyChecker) statefulSetReady(sts *appsv1.StatefulSet) bool { return true } +func (c *ReadyChecker) replicationControllerReady(rc *corev1.ReplicationController) bool { + // Verify the generation observed by the replicationController controller matches the spec generation + if rc.Status.ObservedGeneration != rc.ObjectMeta.Generation { + c.log("ReplicationController is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rc.Namespace, rc.Name, rc.Status.ObservedGeneration, rc.ObjectMeta.Generation) + return false + } + return true +} + +func (c *ReadyChecker) replicaSetReady(rs *appsv1.ReplicaSet) bool { + // Verify the generation observed by the replicaSet controller matches the spec generation + if rs.Status.ObservedGeneration != rs.ObjectMeta.Generation { + c.log("ReplicaSet is not ready: %s/%s. observedGeneration (%d) does not match spec generation (%d).", rs.Namespace, rs.Name, rs.Status.ObservedGeneration, rs.ObjectMeta.Generation) + return false + } + return true +} + func getPods(ctx context.Context, client kubernetes.Interface, namespace, selector string) ([]corev1.Pod, error) { list, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: selector, diff --git a/vendor/helm.sh/helm/v3/pkg/lint/lint.go b/vendor/helm.sh/helm/v3/pkg/lint/lint.go index 67e76bd3d..c0e79f55b 100644 --- a/vendor/helm.sh/helm/v3/pkg/lint/lint.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/lint.go @@ -19,19 +19,25 @@ package lint // import "helm.sh/helm/v3/pkg/lint" import ( "path/filepath" + "helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/lint/rules" "helm.sh/helm/v3/pkg/lint/support" ) // All runs all of the available linters on the given base directory. -func All(basedir string, values map[string]interface{}, namespace string, strict bool) support.Linter { +func All(basedir string, values map[string]interface{}, namespace string, _ bool) support.Linter { + return AllWithKubeVersion(basedir, values, namespace, nil) +} + +// AllWithKubeVersion runs all the available linters on the given base directory, allowing to specify the kubernetes version. +func AllWithKubeVersion(basedir string, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) support.Linter { // Using abs path to get directory context chartDir, _ := filepath.Abs(basedir) linter := support.Linter{ChartDir: chartDir} rules.Chartfile(&linter) rules.ValuesWithOverrides(&linter, values) - rules.Templates(&linter, values, namespace, strict) + rules.TemplatesWithKubeVersion(&linter, values, namespace, kubeVersion) rules.Dependencies(&linter) return linter } diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go index 70532ad4f..910602b7d 100644 --- a/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/chartfile.go @@ -106,6 +106,10 @@ func validateChartName(cf *chart.Metadata) error { if cf.Name == "" { return errors.New("name is required") } + name := filepath.Base(cf.Name) + if name != cf.Name { + return fmt.Errorf("chart name %q is invalid", cf.Name) + } return nil } diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go index abecd1feb..f1ab1dcad 100644 --- a/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/dependencies.go @@ -37,6 +37,7 @@ func Dependencies(linter *support.Linter) { } linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependencyInMetadata(c)) + linter.RunLinterRule(support.ErrorSev, linter.ChartDir, validateDependenciesUnique(c)) linter.RunLinterRule(support.WarningSev, linter.ChartDir, validateDependencyInChartsDir(c)) } @@ -80,3 +81,23 @@ func validateDependencyInMetadata(c *chart.Chart) (err error) { } return err } + +func validateDependenciesUnique(c *chart.Chart) (err error) { + dependencies := map[string]*chart.Dependency{} + shadowing := []string{} + + for _, dep := range c.Metadata.Dependencies { + key := dep.Name + if dep.Alias != "" { + key = dep.Alias + } + if dependencies[key] != nil { + shadowing = append(shadowing, key) + } + dependencies[key] = dep + } + if len(shadowing) > 0 { + err = fmt.Errorf("multiple dependencies with name or alias: %s", strings.Join(shadowing, ",")) + } + return err +} diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go index ce19b91d5..90e7748a4 100644 --- a/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/deprecations.go @@ -24,6 +24,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/endpoints/deprecation" kscheme "k8s.io/client-go/kubernetes/scheme" + + "helm.sh/helm/v3/pkg/chartutil" ) var ( @@ -45,7 +47,7 @@ func (e deprecatedAPIError) Error() string { return msg } -func validateNoDeprecations(resource *K8sYamlStruct) error { +func validateNoDeprecations(resource *K8sYamlStruct, kubeVersion *chartutil.KubeVersion) error { // if `resource` does not have an APIVersion or Kind, we cannot test it for deprecation if resource.APIVersion == "" { return nil @@ -54,6 +56,14 @@ func validateNoDeprecations(resource *K8sYamlStruct) error { return nil } + majorVersion := k8sVersionMajor + minorVersion := k8sVersionMinor + + if kubeVersion != nil { + majorVersion = kubeVersion.Major + minorVersion = kubeVersion.Minor + } + runtimeObject, err := resourceToRuntimeObject(resource) if err != nil { // do not error for non-kubernetes resources @@ -62,11 +72,12 @@ func validateNoDeprecations(resource *K8sYamlStruct) error { } return err } - maj, err := strconv.Atoi(k8sVersionMajor) + + maj, err := strconv.Atoi(majorVersion) if err != nil { return err } - min, err := strconv.Atoi(k8sVersionMinor) + min, err := strconv.Atoi(minorVersion) if err != nil { return err } diff --git a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go index 000f7ebcf..aa1dbb701 100644 --- a/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go +++ b/vendor/helm.sh/helm/v3/pkg/lint/rules/template.go @@ -45,7 +45,12 @@ var ( ) // Templates lints the templates in the Linter. -func Templates(linter *support.Linter, values map[string]interface{}, namespace string, strict bool) { +func Templates(linter *support.Linter, values map[string]interface{}, namespace string, _ bool) { + TemplatesWithKubeVersion(linter, values, namespace, nil) +} + +// TemplatesWithKubeVersion lints the templates in the Linter, allowing to specify the kubernetes version. +func TemplatesWithKubeVersion(linter *support.Linter, values map[string]interface{}, namespace string, kubeVersion *chartutil.KubeVersion) { fpath := "templates/" templatesPath := filepath.Join(linter.ChartDir, fpath) @@ -70,6 +75,11 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace Namespace: namespace, } + caps := chartutil.DefaultCapabilities.Copy() + if kubeVersion != nil { + caps.KubeVersion = *kubeVersion + } + // lint ignores import-values // See https://github.com/helm/helm/issues/9658 if err := chartutil.ProcessDependenciesWithMerge(chart, values); err != nil { @@ -80,7 +90,8 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace if err != nil { return } - valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, nil) + + valuesToRender, err := chartutil.ToRenderValues(chart, cvals, options, caps) if err != nil { linter.RunLinterRule(support.ErrorSev, fpath, err) return @@ -150,7 +161,7 @@ func Templates(linter *support.Linter, values map[string]interface{}, namespace // NOTE: set to warnings to allow users to support out-of-date kubernetes // Refs https://github.com/helm/helm/issues/8596 linter.RunLinterRule(support.WarningSev, fpath, validateMetadataName(yamlStruct)) - linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct)) + linter.RunLinterRule(support.WarningSev, fpath, validateNoDeprecations(yamlStruct, kubeVersion)) linter.RunLinterRule(support.ErrorSev, fpath, validateMatchSelector(yamlStruct, renderedContent)) linter.RunLinterRule(support.ErrorSev, fpath, validateListAnnotations(yamlStruct, renderedContent)) diff --git a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go index fb3bc5215..5bb743481 100644 --- a/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go +++ b/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go @@ -175,6 +175,10 @@ var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$") // validatePluginData validates a plugin's YAML data. func validatePluginData(plug *Plugin, filepath string) error { + // When metadata section missing, initialize with no data + if plug.Metadata == nil { + plug.Metadata = &Metadata{} + } if !validPluginName.MatchString(plug.Metadata.Name) { return fmt.Errorf("invalid plugin name at %q", filepath) } diff --git a/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go index c99d97b35..5b8a9160f 100644 --- a/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go +++ b/vendor/helm.sh/helm/v3/pkg/pusher/pusher.go @@ -116,7 +116,7 @@ var ociProvider = Provider{ // All finds all of the registered pushers as a list of Provider instances. // Currently, just the built-in pushers are collected. -func All(settings *cli.EnvSettings) Providers { +func All(_ *cli.EnvSettings) Providers { result := Providers{ociProvider} return result } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go index b5d75b88b..bb8e84dda 100644 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go +++ b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go @@ -132,7 +132,7 @@ func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.H return h } -func lessByKind(a interface{}, b interface{}, kindA string, kindB string, o KindSortOrder) bool { +func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { ordering := make(map[string]int, len(o)) for v, k := range o { ordering[k] = v diff --git a/vendor/helm.sh/helm/v3/pkg/repo/index.go b/vendor/helm.sh/helm/v3/pkg/repo/index.go index 8a23ba060..8009c15c8 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/index.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/index.go @@ -359,6 +359,10 @@ func loadIndex(data []byte, source string) (*IndexFile, error) { log.Printf("skipping loading invalid entry for chart %q from %s: empty entry", name, source) continue } + // When metadata section missing, initialize with no data + if cvs[idx].Metadata == nil { + cvs[idx].Metadata = &chart.Metadata{} + } if cvs[idx].APIVersion == "" { cvs[idx].APIVersion = chart.APIVersionV1 } diff --git a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go index a1c2209a3..8f5714f15 100644 --- a/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go +++ b/vendor/helm.sh/helm/v3/pkg/storage/driver/sql.go @@ -662,7 +662,7 @@ func (s *SQL) Delete(key string) (*rspb.Release, error) { } // Get release custom labels from database -func (s *SQL) getReleaseCustomLabels(key string, namespace string) (map[string]string, error) { +func (s *SQL) getReleaseCustomLabels(key string, _ string) (map[string]string, error) { query, args, err := s.statementBuilder. Select(sqlCustomLabelsTableKeyColumn, sqlCustomLabelsTableValueColumn). From(sqlCustomLabelsTableName). diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index cf9b6e6eb..d099238cd 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -3286,7 +3286,7 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1aade3806..61ba21bca 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -423,7 +423,7 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. + // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 01152a096..fd6f7dc61 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1478,7 +1478,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { diff --git a/vendor/modules.txt b/vendor/modules.txt index 0c2235207..616854d42 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -27,7 +27,7 @@ github.com/Masterminds/squirrel # github.com/Microsoft/hcsshim v0.11.4 ## explicit; go 1.18 github.com/Microsoft/hcsshim/osversion -# github.com/NVIDIA/go-nvlib v0.1.0 +# github.com/NVIDIA/go-nvlib v0.2.0 ## explicit; go 1.20 github.com/NVIDIA/go-nvlib/pkg/nvmdev github.com/NVIDIA/go-nvlib/pkg/nvpci @@ -233,8 +233,8 @@ github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -346,9 +346,6 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 -## explicit; go 1.19 -github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure @@ -361,7 +358,7 @@ github.com/mitchellh/hashstructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/mittwald/go-helm-client v0.12.7 +# github.com/mittwald/go-helm-client v0.12.9 ## explicit; go 1.21 github.com/mittwald/go-helm-client github.com/mittwald/go-helm-client/values @@ -394,8 +391,8 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/ginkgo/v2 v2.14.0 -## explicit; go 1.18 +# github.com/onsi/ginkgo/v2 v2.17.1 +## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -416,8 +413,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.30.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.32.0 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -472,8 +469,8 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.17 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 -# github.com/prometheus/client_golang v1.18.0 -## explicit; go 1.19 +# github.com/prometheus/client_golang v1.19.0 +## explicit; go 1.20 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal @@ -482,7 +479,7 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.5.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.45.0 +# github.com/prometheus/common v0.48.0 ## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -492,31 +489,39 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/regclient/regclient v0.4.8 +# github.com/regclient/regclient v0.6.0 ## explicit; go 1.20 github.com/regclient/regclient github.com/regclient/regclient/config github.com/regclient/regclient/internal/auth +github.com/regclient/regclient/internal/cache github.com/regclient/regclient/internal/conffile github.com/regclient/regclient/internal/httplink +github.com/regclient/regclient/internal/limitread +github.com/regclient/regclient/internal/muset github.com/regclient/regclient/internal/reghttp github.com/regclient/regclient/internal/rwfs +github.com/regclient/regclient/internal/strparse +github.com/regclient/regclient/internal/throttle github.com/regclient/regclient/internal/timejson github.com/regclient/regclient/internal/units github.com/regclient/regclient/internal/version -github.com/regclient/regclient/internal/wraperr github.com/regclient/regclient/pkg/archive github.com/regclient/regclient/scheme github.com/regclient/regclient/scheme/ocidir github.com/regclient/regclient/scheme/reg github.com/regclient/regclient/types github.com/regclient/regclient/types/blob +github.com/regclient/regclient/types/descriptor github.com/regclient/regclient/types/docker github.com/regclient/regclient/types/docker/schema1 github.com/regclient/regclient/types/docker/schema2 +github.com/regclient/regclient/types/errs github.com/regclient/regclient/types/manifest +github.com/regclient/regclient/types/mediatype github.com/regclient/regclient/types/oci github.com/regclient/regclient/types/oci/v1 +github.com/regclient/regclient/types/ping github.com/regclient/regclient/types/platform github.com/regclient/regclient/types/ref github.com/regclient/regclient/types/referrer @@ -552,6 +557,12 @@ github.com/spf13/pflag ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require +# github.com/ulikunitz/xz v0.5.11 +## explicit; go 1.12 +github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash +github.com/ulikunitz/xz/internal/xlog +github.com/ulikunitz/xz/lzma # github.com/urfave/cli/v2 v2.27.1 ## explicit; go 1.18 github.com/urfave/cli/v2 @@ -612,7 +623,7 @@ go.starlark.net/syntax # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -623,7 +634,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.18.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -642,10 +653,10 @@ golang.org/x/crypto/scrypt golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.15.0 +# golang.org/x/mod v0.17.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.19.0 +# golang.org/x/net v0.20.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -657,21 +668,21 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/socks golang.org/x/net/proxy -# golang.org/x/oauth2 v0.15.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.5.0 +# golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -699,8 +710,9 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.16.1 +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 +golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 @@ -731,13 +743,15 @@ google.golang.org/grpc/internal/status google.golang.org/grpc/resolver google.golang.org/grpc/serviceconfig google.golang.org/grpc/status -# google.golang.org/protobuf v1.32.0 +# google.golang.org/protobuf v1.33.0 ## explicit; go 1.17 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag @@ -760,6 +774,7 @@ google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb @@ -775,10 +790,9 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.13.3 -## explicit; go 1.19 +# helm.sh/helm/v3 v3.14.2 +## explicit; go 1.21 helm.sh/helm/v3/internal/fileutil -helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/resolver helm.sh/helm/v3/internal/sympath helm.sh/helm/v3/internal/third_party/dep/fs @@ -796,6 +810,7 @@ helm.sh/helm/v3/pkg/engine helm.sh/helm/v3/pkg/getter helm.sh/helm/v3/pkg/helmpath helm.sh/helm/v3/pkg/helmpath/xdg +helm.sh/helm/v3/pkg/ignore helm.sh/helm/v3/pkg/kube helm.sh/helm/v3/pkg/kube/fake helm.sh/helm/v3/pkg/lint @@ -814,7 +829,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.29.1 +# k8s.io/api v0.29.3 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -871,7 +886,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.1 +# k8s.io/apiextensions-apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -882,7 +897,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.1 +# k8s.io/apimachinery v0.29.3 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -943,7 +958,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.1 +# k8s.io/apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiserver/pkg/endpoints/deprecation # k8s.io/cli-runtime v0.29.1 @@ -952,7 +967,7 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.29.1 +# k8s.io/client-go v0.29.3 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1114,7 +1129,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.29.1 +# k8s.io/component-base v0.29.3 ## explicit; go 1.21 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 @@ -1178,7 +1193,7 @@ oras.land/oras-go/pkg/registry/remote/auth oras.land/oras-go/pkg/registry/remote/internal/errutil oras.land/oras-go/pkg/registry/remote/internal/syncutil oras.land/oras-go/pkg/target -# sigs.k8s.io/controller-runtime v0.17.1 +# sigs.k8s.io/controller-runtime v0.17.3 ## explicit; go 1.21 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 1cecf88e5..e7f2945f1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "sort" "time" "golang.org/x/exp/maps" @@ -421,7 +422,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { for namespace, cfg := range opts.DefaultNamespaces { cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) if namespace == metav1.NamespaceAll { - cfg.FieldSelector = fields.AndSelectors(appendIfNotNil(namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), cfg.FieldSelector)...) + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), + cfg.FieldSelector, + )..., + ) } opts.DefaultNamespaces[namespace] = cfg } @@ -435,7 +441,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { return opts, fmt.Errorf("type %T is not namespaced, but its ByObject.Namespaces setting is not nil", obj) } - // Default the namespace-level configs first, because they need to use the undefaulted type-level config. + if isNamespaced && byObject.Namespaces == nil { + byObject.Namespaces = maps.Clone(opts.DefaultNamespaces) + } + + // Default the namespace-level configs first, because they need to use the undefaulted type-level config + // to be able to potentially fall through to settings from DefaultNamespaces. for namespace, config := range byObject.Namespaces { // 1. Default from the undefaulted type-level config config = defaultConfig(config, byObjectToConfig(byObject)) @@ -461,14 +472,14 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { byObject.Namespaces[namespace] = config } - defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) - byObject.Label = defaultedConfig.LabelSelector - byObject.Field = defaultedConfig.FieldSelector - byObject.Transform = defaultedConfig.Transform - byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy - - if isNamespaced && byObject.Namespaces == nil { - byObject.Namespaces = opts.DefaultNamespaces + // Only default ByObject iself if it isn't namespaced or has no namespaces configured, as only + // then any of this will be honored. + if !isNamespaced || len(byObject.Namespaces) == 0 { + defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) + byObject.Label = defaultedConfig.LabelSelector + byObject.Field = defaultedConfig.FieldSelector + byObject.Transform = defaultedConfig.Transform + byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy } opts.ByObject[obj] = byObject @@ -498,20 +509,21 @@ func defaultConfig(toDefault, defaultFrom Config) Config { return toDefault } -func namespaceAllSelector(namespaces []string) fields.Selector { +func namespaceAllSelector(namespaces []string) []fields.Selector { selectors := make([]fields.Selector, 0, len(namespaces)-1) + sort.Strings(namespaces) for _, namespace := range namespaces { if namespace != metav1.NamespaceAll { selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) } } - return fields.AndSelectors(selectors...) + return selectors } -func appendIfNotNil[T comparable](a, b T) []T { +func appendIfNotNil[T comparable](a []T, b T) []T { if b != *new(T) { - return []T{a, b} + return append(a, b) } - return []T{a} + return a } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index 24af330c8..927be22b4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -53,7 +53,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM // client for discovery information to do REST mappings. type mapper struct { mapper meta.RESTMapper - client *discovery.DiscoveryClient + client discovery.DiscoveryInterface knownGroups map[string]*restmapper.APIGroupResources apiGroups map[string]*metav1.APIGroup @@ -280,11 +280,15 @@ func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ... groupVersion := schema.GroupVersion{Group: groupName, Version: version} apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if apierrors.IsNotFound(err) && m.isGroupVersionCached(groupVersion) { + if apierrors.IsNotFound(err) { // If the version is not found, we remove the group from the cache // so it gets refreshed on the next call. - delete(m.apiGroups, groupName) - delete(m.knownGroups, groupName) + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } continue } else if err != nil { failedGroups[groupVersion] = err @@ -313,3 +317,19 @@ func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { return false } + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go index 790a1faab..b90a6ebb8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go @@ -947,7 +947,7 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru if err := json.Unmarshal(modified, obj); err != nil { return nil, err } - case types.StrategicMergePatchType, types.ApplyPatchType: + case types.StrategicMergePatchType: mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) if err != nil { return nil, err @@ -955,8 +955,10 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru if err = json.Unmarshal(mergedByte, obj); err != nil { return nil, err } + case types.ApplyPatchType: + return nil, errors.New("apply patches are not supported in the fake client. Follow https://github.com/kubernetes/kubernetes/issues/115598 for the current status") default: - return nil, fmt.Errorf("PatchType is not supported") + return nil, fmt.Errorf("%s PatchType is not supported", action.GetPatchType()) } return obj, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index a16f354a1..fdb9d982d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -518,6 +518,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e // Stop all the leader election runnables, which includes reconcilers. cm.logger.Info("Stopping and waiting for leader election runnables") + // Prevent leader election when shutting down a non-elected manager + cm.runnables.LeaderElection.startOnce.Do(func() {}) cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) // Stop the caches before the leader election runnables, this is an important diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index 96566f5df..606091048 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -263,6 +263,15 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { r.start.Unlock() } + // Recheck if we're stopped and hold the readlock, given that the stop and start can be called + // at the same time, we can end up in a situation where the runnable is added + // after the group is stopped and the channel is closed. + r.stop.RLock() + defer r.stop.RUnlock() + if r.stopped { + return errRunnableGroupStopped + } + // Enqueue the runnable. r.ch <- readyRunnable return nil @@ -272,7 +281,11 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { func (r *runnableGroup) StopAndWait(ctx context.Context) { r.stopOnce.Do(func() { // Close the reconciler channel once we're done. - defer close(r.ch) + defer func() { + r.stop.Lock() + close(r.ch) + r.stop.Unlock() + }() _ = r.Start(ctx) r.stop.Lock() From 70638697468ddbf76e99d158a12a7d93ac446fd5 Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Fri, 12 Apr 2024 14:10:30 -0700 Subject: [PATCH 016/293] bump openshift and related go deps Signed-off-by: Tariq Ibrahim --- go.mod | 46 +- go.sum | 91 +- .../emicklei/go-restful/v3/CHANGES.md | 4 - .../go-restful/v3/entity_accessors.go | 12 +- .../github.com/go-openapi/swag/BENCHMARK.md | 52 + .../go-openapi/swag/initialism_index.go | 137 ++ .../github.com/go-openapi/swag/name_lexem.go | 70 +- vendor/github.com/go-openapi/swag/split.go | 470 +++- .../go-openapi/swag/string_bytes.go | 22 + vendor/github.com/go-openapi/swag/util.go | 198 +- ...rsion-operator_01_clusteroperator.crd.yaml | 167 -- ...01_clusterversion-CustomNoUpgrade.crd.yaml | 780 ------ ...perator_01_clusterversion-Default.crd.yaml | 727 ------ ...usterversion-TechPreviewNoUpgrade.crd.yaml | 780 ------ .../0000_03_config-operator_01_proxy.crd.yaml | 106 - ...rketplace-operator_01_operatorhub.crd.yaml | 109 - ...ator_01_apiserver-CustomNoUpgrade.crd.yaml | 315 --- ...fig-operator_01_apiserver-Default.crd.yaml | 315 --- ...01_apiserver-TechPreviewNoUpgrade.crd.yaml | 315 --- ...01_authentication.crd-CustomNoUpgrade.yaml | 554 ----- ...authentication.crd-Default-Hypershift.yaml | 552 ----- ...tication.crd-Default-Hypershift.yaml-patch | 285 --- ...perator_01_authentication.crd-Default.yaml | 171 -- ...thentication.crd-TechPreviewNoUpgrade.yaml | 555 ----- ...000_10_config-operator_01_console.crd.yaml | 75 - ...g-operator_01_dns-CustomNoUpgrade.crd.yaml | 159 -- ...10_config-operator_01_dns-Default.crd.yaml | 159 -- ...rator_01_dns-TechPreviewNoUpgrade.crd.yaml | 159 -- ...10_config-operator_01_featuregate.crd.yaml | 213 -- .../0000_10_config-operator_01_image.crd.yaml | 162 -- ...ig-operator_01_imagecontentpolicy.crd.yaml | 112 - ...-operator_01_imagedigestmirrorset.crd.yaml | 141 -- ...fig-operator_01_imagetagmirrorset.crd.yaml | 144 -- ...01_infrastructure-CustomNoUpgrade.crd.yaml | 2089 ----------------- ...rastructure-CustomNoUpgrade.crd.yaml-patch | 24 - ...perator_01_infrastructure-Default.crd.yaml | 1761 -------------- ...r_01_infrastructure-Default.crd.yaml-patch | 24 - ...frastructure-TechPreviewNoUpgrade.crd.yaml | 2089 ----------------- ...ucture-TechPreviewNoUpgrade.crd.yaml-patch | 24 - ...000_10_config-operator_01_ingress.crd.yaml | 553 ----- ...erator_01_network-CustomNoUpgrade.crd.yaml | 284 --- ...onfig-operator_01_network-Default.crd.yaml | 284 --- ...r_01_network-TechPreviewNoUpgrade.crd.yaml | 284 --- .../0000_10_config-operator_01_node.crd.yaml | 66 - .../0000_10_config-operator_01_oauth.crd.yaml | 698 ------ ...000_10_config-operator_01_project.crd.yaml | 68 - ...ator_01_scheduler-CustomNoUpgrade.crd.yaml | 130 - ...fig-operator_01_scheduler-Default.crd.yaml | 109 - ...01_scheduler-TechPreviewNoUpgrade.crd.yaml | 130 - ...troller-manager-operator_01_build.crd.yaml | 431 ---- .../config/v1/custom.apiserver.testsuite.yaml | 35 - .../v1/custom.authentication.testsuite.yaml | 284 --- .../v1/custom.clusterversion.testsuite.yaml | 472 ---- .../api/config/v1/custom.dns.testsuite.yaml | 104 - .../v1/custom.infrastructure.testsuite.yaml | 321 --- .../config/v1/custom.network.testsuite.yaml | 28 - .../config/v1/custom.scheduler.testsuite.yaml | 14 - .../github.com/openshift/api/config/v1/doc.go | 1 + .../openshift/api/config/v1/feature_gates.go | 942 ++++---- .../config/v1/stable.apiserver.testsuite.yaml | 36 - .../v1/stable.authentication.testsuite.yaml | 21 - .../api/config/v1/stable.build.testsuite.yaml | 14 - .../v1/stable.clusteroperator.testsuite.yaml | 14 - .../v1/stable.clusterversion.testsuite.yaml | 418 ---- .../config/v1/stable.console.testsuite.yaml | 14 - .../api/config/v1/stable.dns.testsuite.yaml | 105 - .../v1/stable.featuregate.testsuite.yaml | 14 - ...e.hypershift.authentication.testsuite.yaml | 298 --- .../api/config/v1/stable.image.testsuite.yaml | 14 - .../stable.imagecontentpolicy.testsuite.yaml | 14 - ...stable.imagedigestmirrorset.testsuite.yaml | 14 - .../stable.imagetagmirrorset.testsuite.yaml | 14 - .../v1/stable.infrastructure.testsuite.yaml | 1262 ---------- .../config/v1/stable.ingress.testsuite.yaml | 14 - .../config/v1/stable.network.testsuite.yaml | 37 - .../api/config/v1/stable.node.testsuite.yaml | 14 - .../api/config/v1/stable.oauth.testsuite.yaml | 14 - .../v1/stable.operatorhub.testsuite.yaml | 14 - .../config/v1/stable.project.testsuite.yaml | 14 - .../api/config/v1/stable.proxy.testsuite.yaml | 14 - .../config/v1/stable.scheduler.testsuite.yaml | 14 - .../v1/techpreview.apiserver.testsuite.yaml | 35 - .../techpreview.authentication.testsuite.yaml | 298 --- .../techpreview.clusterversion.testsuite.yaml | 472 ---- .../config/v1/techpreview.dns.testsuite.yaml | 14 - .../techpreview.infrastructure.testsuite.yaml | 749 ------ .../v1/techpreview.network.testsuite.yaml | 28 - .../v1/techpreview.scheduler.testsuite.yaml | 14 - .../api/config/v1/types_apiserver.go | 5 + .../api/config/v1/types_authentication.go | 16 +- .../openshift/api/config/v1/types_build.go | 6 + .../api/config/v1/types_cluster_operator.go | 11 + .../api/config/v1/types_cluster_version.go | 13 +- .../openshift/api/config/v1/types_console.go | 5 + .../openshift/api/config/v1/types_dns.go | 5 + .../openshift/api/config/v1/types_feature.go | 180 +- .../openshift/api/config/v1/types_image.go | 5 + .../config/v1/types_image_content_policy.go | 5 + .../v1/types_image_digest_mirror_set.go | 5 + .../config/v1/types_image_tag_mirror_set.go | 5 + .../api/config/v1/types_infrastructure.go | 24 +- .../openshift/api/config/v1/types_ingress.go | 33 +- .../openshift/api/config/v1/types_network.go | 113 +- .../openshift/api/config/v1/types_node.go | 3 + .../openshift/api/config/v1/types_oauth.go | 5 + .../api/config/v1/types_operatorhub.go | 5 + .../openshift/api/config/v1/types_project.go | 5 + .../openshift/api/config/v1/types_proxy.go | 5 + .../api/config/v1/types_scheduling.go | 7 +- .../api/config/v1/types_testreporting.go | 46 + .../api/config/v1/zz_generated.deepcopy.go | 175 ++ ..._generated.featuregated-crd-manifests.yaml | 490 ++++ .../v1/zz_generated.swagger_doc_generated.go | 68 +- ...or_01_backup-TechPreviewNoUpgrade.crd.yaml | 142 -- ...lusterimagepolicy-CustomNoUpgrade.crd.yaml | 398 ---- ...rimagepolicy-TechPreviewNoUpgrade.crd.yaml | 398 ---- ...or_01_imagepolicy-CustomNoUpgrade.crd.yaml | 398 ---- ..._imagepolicy-TechPreviewNoUpgrade.crd.yaml | 398 ---- ...ig-operator_01_insightsdatagather.crd.yaml | 88 - .../custom.clusterimagepolicy.testsuite.yaml | 451 ---- .../custom.imagepolicy.testsuite.yaml | 451 ---- .../techpreview.backup.testsuite.yaml | 202 -- ...hpreview.clusterimagepolicy.testsuite.yaml | 451 ---- .../techpreview.imagepolicy.testsuite.yaml | 451 ---- ...hpreview.insightsdatagather.testsuite.yaml | 14 - .../api/config/v1alpha1/types_backup.go | 6 + .../v1alpha1/types_cluster_image_policy.go | 6 + .../api/config/v1alpha1/types_image_policy.go | 6 + .../api/config/v1alpha1/types_insights.go | 6 + ..._generated.featuregated-crd-manifests.yaml | 92 + ...0000_03_security-openshift_01_scc.crd.yaml | 365 --- .../openshift/api/security/v1/generated.proto | 22 +- ....securitycontextconstraints.testsuite.yaml | 36 - .../openshift/api/security/v1/types.go | 22 +- ..._generated.featuregated-crd-manifests.yaml | 58 + .../config/v1/networkdiagnostics.go | 45 + .../v1/networkdiagnosticssourceplacement.go | 44 + .../v1/networkdiagnosticstargetplacement.go | 44 + .../config/v1/networkspec.go | 9 + .../applyconfigurations/internal/internal.go | 47 + .../operators/v1alpha1/catalogsource_types.go | 39 +- .../v1alpha1/clusterserviceversion.go | 15 +- .../operators/v1alpha1/subscription_types.go | 20 + .../v1alpha1/zz_generated.deepcopy.go | 33 +- .../pkg/apis/monitoring/register.go | 8 +- .../pkg/apis/monitoring/resource.go | 60 + .../apis/monitoring/v1/alertmanager_types.go | 100 +- .../apis/monitoring/v1/podmonitor_types.go | 256 +- .../pkg/apis/monitoring/v1/probe_types.go | 45 +- .../apis/monitoring/v1/prometheus_types.go | 1560 +++++++++--- .../monitoring/v1/prometheusrule_types.go | 10 +- .../monitoring/v1/servicemonitor_types.go | 115 +- .../pkg/apis/monitoring/v1/thanos_types.go | 42 +- .../pkg/apis/monitoring/v1/types.go | 353 ++- .../monitoring/v1/zz_generated.deepcopy.go | 770 +++++- .../prometheus/client_model/go/metrics.pb.go | 195 +- .../net/http/otelhttp/common.go | 20 +- .../net/http/otelhttp/handler.go | 40 +- .../otelhttp/internal/semconvutil/httpconv.go | 273 ++- .../otelhttp/internal/semconvutil/netconv.go | 175 +- .../net/http/otelhttp/transport.go | 109 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 100 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 23 +- vendor/go.opentelemetry.io/otel/README.md | 2 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 +- .../go.opentelemetry.io/otel/attribute/set.go | 89 +- .../otel/baggage/baggage.go | 330 ++- vendor/go.opentelemetry.io/otel/doc.go | 2 +- .../otel/propagation/trace_context.go | 94 +- .../otel/semconv/v1.20.0/attribute_group.go | 1209 ++++++++++ .../otel/semconv/{v1.17.0 => v1.20.0}/doc.go | 4 +- .../semconv/{v1.17.0 => v1.20.0}/event.go | 2 +- .../semconv/{v1.17.0 => v1.20.0}/exception.go | 2 +- .../otel/semconv/{v1.17.0 => v1.20.0}/http.go | 2 +- .../semconv/{v1.17.0 => v1.20.0}/resource.go | 297 ++- .../semconv/{v1.17.0 => v1.20.0}/schema.go | 4 +- .../semconv/{v1.17.0 => v1.20.0}/trace.go | 1733 ++++---------- .../otel/semconv/v1.21.0/doc.go | 4 +- .../otel/trace/tracestate.go | 197 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 16 +- vendor/golang.org/x/exp/slices/slices.go | 44 +- vendor/golang.org/x/net/html/token.go | 12 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/transport.go | 9 + .../grpc/internal/internal.go | 20 +- ...ive_nonunix.go => tcp_keepalive_others.go} | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/resolver/resolver.go | 10 + vendor/k8s.io/klog/v2/OWNERS | 4 +- vendor/k8s.io/klog/v2/contextual_slog.go | 31 + vendor/k8s.io/klog/v2/klog.go | 23 +- vendor/k8s.io/klog/v2/klogr_slog.go | 10 +- vendor/k8s.io/klog/v2/safeptr.go | 34 + vendor/modules.txt | 54 +- 197 files changed, 8677 insertions(+), 29294 deletions(-) create mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/types_testreporting.go create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml delete mode 100644 vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml delete mode 100644 vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/doc.go (85%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/event.go (99%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/exception.go (91%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/http.go (91%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/resource.go (92%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/schema.go (86%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.17.0 => v1.20.0}/trace.go (63%) rename vendor/google.golang.org/grpc/internal/{tcp_keepalive_nonunix.go => tcp_keepalive_others.go} (96%) create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 vendor/k8s.io/klog/v2/contextual_slog.go create mode 100644 vendor/k8s.io/klog/v2/safeptr.go diff --git a/go.mod b/go.mod index c9ff8b672..df9990a40 100644 --- a/go.mod +++ b/go.mod @@ -8,16 +8,16 @@ require ( github.com/NVIDIA/k8s-kata-manager v0.0.0-20230620232711-08b57feb9b5a github.com/NVIDIA/k8s-operator-libs v0.0.0-20240214071211-ea58a3ada15c github.com/NVIDIA/nvidia-container-toolkit v1.14.6 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/go-logr/logr v1.4.1 github.com/mitchellh/hashstructure v1.1.0 github.com/mittwald/go-helm-client v0.12.9 github.com/onsi/ginkgo/v2 v2.17.1 github.com/onsi/gomega v1.32.0 - github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab - github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5 - github.com/operator-framework/api v0.17.6 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2 + github.com/openshift/api v0.0.0-20240412130237-e2b0b690b638 + github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae + github.com/operator-framework/api v0.23.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.1 github.com/prometheus/client_golang v1.19.0 github.com/regclient/regclient v0.6.0 github.com/sirupsen/logrus v1.9.3 @@ -29,7 +29,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v0.29.3 - k8s.io/klog/v2 v2.110.1 + k8s.io/klog/v2 v2.120.1 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/yaml v1.4.0 @@ -62,7 +62,7 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/emicklei/go-restful/v3 v3.11.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect @@ -75,7 +75,7 @@ require ( github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -125,8 +125,8 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -143,26 +143,26 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.1 // indirect + go.opentelemetry.io/otel/metric v1.23.1 // indirect + go.opentelemetry.io/otel/trace v1.23.1 // indirect go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/grpc v1.61.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -172,9 +172,9 @@ require ( k8s.io/apiserver v0.29.3 // indirect k8s.io/cli-runtime v0.29.1 // indirect k8s.io/component-base v0.29.3 // indirect - k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 // indirect + k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 // indirect k8s.io/kubectl v0.29.1 // indirect - k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/kustomize/api v0.16.0 // indirect sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect diff --git a/go.sum b/go.sum index 471db6a74..e325bdcba 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,9 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= @@ -97,8 +98,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk= -github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= +github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= @@ -123,7 +124,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -134,8 +134,8 @@ github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbX github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -292,12 +292,12 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab h1:L3k198pZJhluvJZzD/ySpkKqJzQh5MgWlsT4U6NOYUY= -github.com/openshift/api v0.0.0-20240306072808-610cbc77dbab/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5 h1:iQ2Y1LUX7FbBm6ddaSVz/KeWXUMkqrBP/C5yt0DvBgI= -github.com/openshift/client-go v0.0.0-20240215090359-b71f6f2731f5/go.mod h1:Y5Hp789dTrF6Fq8cA5YQlpwffmlLy8mc2un/CY0cg7Q= -github.com/operator-framework/api v0.17.6 h1:E6+vlvYUKafvoXYtCuHlDZrXX4vl8AT+r93OxNlzjpU= -github.com/operator-framework/api v0.17.6/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= +github.com/openshift/api v0.0.0-20240412130237-e2b0b690b638 h1://6BunjFcTaoaWD9IXRC2BynmIW9ag7k2ekGrUYJbzY= +github.com/openshift/api v0.0.0-20240412130237-e2b0b690b638/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae h1:WzEMf853apLG0ZgkfmKvYXBKBqhzx7nalP306yQP1ck= +github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae/go.mod h1:YOfx7b9ieudQJImuo95BcVzifosCrCGBErbO/w/njBU= +github.com/operator-framework/api v0.23.0 h1:kHymOwcHBpBVujT49SKOCd4EVG7Odwj4wl3NbOR2LLA= +github.com/operator-framework/api v0.23.0/go.mod h1:oKcFOz+Xc1UhMi2Pzcp6qsO7wjS4r+yP7EQprQBXrfM= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -305,12 +305,13 @@ github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rK github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2 h1:DZzMjhqxx3+kAPpwWdng3ktO6NErh1wGuW5tXJamak8= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.65.2/go.mod h1:xcfWyzl4BpEe5jnVkw7D1yCHU7GHjfjCERJsEfGbpSU= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.1 h1:VvIIWSQbvGaDgIeTrOintF/czS6UpLB086EUslAm7aI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.1/go.mod h1:yJ3CawR/A5qEYFEeCOUVYLTwYxmacfHQhJS+b/2QiaM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -318,8 +319,8 @@ github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7km github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= @@ -394,14 +395,14 @@ gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884 h1:V0 gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230818092907-09424fdc8884/go.mod h1:/x5Ky1ZJNyCjDkgSL1atII0EFKQF5WaIHKeP5nkaQfk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= +go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= +go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= +go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= +go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= +go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -416,10 +417,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -434,10 +435,10 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -483,8 +484,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -493,10 +494,10 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -536,14 +537,14 @@ k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432 h1:+XYBQU3ZKUu60H6fEnkitTTabGoKfIG8zczhZBENu9o= -k8s.io/kube-openapi v0.0.0-20240103195357-a9f8850cb432/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1 h1:rtdnaWfP40MTKv7izH81gkWpZB45pZrwIxyZdPSn1mI= +k8s.io/kube-openapi v0.0.0-20240221221325-2ac9dc51f3f1/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 97bb1a9d6..5edd5a7ca 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,9 +1,5 @@ # Change history of go-restful -## [v3.11.1] - 2024-01-03 - -- remove the dependency on github.com/json-iterator/go (#539) - ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 8a35c9597..9808752ac 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -11,6 +11,12 @@ import ( "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. @@ -128,7 +134,7 @@ type entityJSONAccess struct { // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) + decoder := NewDecoder(req.Request.Body) decoder.UseNumber() return decoder.Decode(v) } @@ -147,7 +153,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, "", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -159,5 +165,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184d..2b2e46310 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..c52d6bf71 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0413f7447..5051401c4 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { return strings.TrimSpace(str) @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,26 +162,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { + out := make([]string, 0, len(*in)) + for _, w := range *in { original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) @@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml deleted file mode 100644 index 3baf5a456..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml +++ /dev/null @@ -1,167 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/497 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: clusteroperators.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterOperator - listKind: ClusterOperatorList - plural: clusteroperators - shortNames: - - co - singular: clusteroperator - scope: Cluster - versions: - - additionalPrinterColumns: - - description: The version the operator is at. - jsonPath: .status.versions[?(@.name=="operator")].version - name: Version - type: string - - description: Whether the operator is running and stable. - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - description: Whether the operator is processing changes. - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - description: Whether the operator is degraded. - jsonPath: .status.conditions[?(@.type=="Degraded")].status - name: Degraded - type: string - - description: The time the operator's Available status last changed. - jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime - name: Since - type: date - name: v1 - schema: - openAPIV3Schema: - description: "ClusterOperator is the Custom Resource object which holds the - current state of an operator. This object is used by operators to convey - their state to the rest of the cluster. \n Compatibility level 1: Stable - within a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds configuration that could apply to any operator. - type: object - status: - description: status holds the information about the state of an operator. It - is consistent with status information across the Kubernetes ecosystem. - properties: - conditions: - description: conditions describes the state of the operator's managed - and monitored components. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - extension: - description: extension contains any additional status information - specific to the operator which owns this status object. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - relatedObjects: - description: 'relatedObjects is a list of objects that are "interesting" - or related to this operator. Common uses are: 1. the detailed resource - driving the operator 2. operator namespaces 3. operand namespaces' - items: - description: ObjectReference contains enough information to let - you inspect or modify the referred object. - properties: - group: - description: group of the referent. - type: string - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: resource of the referent. - type: string - required: - - group - - name - - resource - type: object - type: array - versions: - description: versions is a slice of operator and operand version tuples. Operators - which manage multiple operands will have multiple operand entries - in the array. Available operators must report the version of the - operator itself with the name "operator". An operator reports a - new "operator" version when it has rolled out the new version to - all of its operands. - items: - properties: - name: - description: name is the name of the particular operand this - version is for. It usually matches container images, not - operators. - type: string - version: - description: version indicates which version of a particular - operand is currently being managed. It must always match - the Available operand. If 1.0.0 is Available, then this must - indicate 1.0.0 even if the operator is trying to rollout 1.1.0 - type: string - required: - - name - - version - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml deleted file mode 100644 index dcb871eee..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,780 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - signatureStores: - description: "signatureStores contains the upstream URIs to verify - release signatures and optional reference to a config map by name - containing the PEM-encoded CA bundle. \n By default, CVO will use - existing signature stores if this property is empty. The CVO will - check the release signatures in the local ConfigMaps first. It will - search for a valid signature in these stores in parallel only when - local ConfigMaps did not include a valid signature. Validation will - fail if none of the signature stores reply with valid signature - before timeout. Setting signatureStores will replace the default - signature stores with custom signature stores. Default stores can - be used with custom signature stores by adding them manually. \n - A maximum of 32 signature stores may be configured." - items: - description: SignatureStore represents the URL of custom Signature - Store - properties: - ca: - description: ca is an optional reference to a config map by - name containing the PEM-encoded CA bundle. It is used as a - trust anchor to validate the TLS certificate presented by - the remote server. The key "ca.crt" is used to locate the - data. If specified and the config map or expected key is not - found, the signature store is not honored. If the specified - ca data is not valid, the signature store is not honored. - If empty, we fall back to the CA configured via Proxy, which - is appended to the default system roots. The namespace for - this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - url: - description: url contains the upstream custom signature store - URL. url should be a valid absolute http/https URI of an upstream - signature store as per rfc1738. This must be provided and - cannot be empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - url - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - url - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml deleted file mode 100644 index 371177aaf..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml +++ /dev/null @@ -1,727 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index fcaf456a2..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,780 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/495 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: clusterversions.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterVersion - plural: clusterversions - singular: clusterversion - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.history[?(@.state=="Completed")].version - name: Version - type: string - - jsonPath: .status.conditions[?(@.type=="Available")].status - name: Available - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].status - name: Progressing - type: string - - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime - name: Since - type: date - - jsonPath: .status.conditions[?(@.type=="Progressing")].message - name: Status - type: string - name: v1 - schema: - openAPIV3Schema: - description: "ClusterVersion is the configuration for the ClusterVersionOperator. - This is where parameters related to automatic updates can be set. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec is the desired state of the cluster version - the operator - will work to ensure that the desired version is applied to the cluster. - properties: - capabilities: - description: capabilities configures the installation of optional, - core cluster components. A null value here is identical to an empty - object; see the child properties for default semantics. - properties: - additionalEnabledCapabilities: - description: additionalEnabledCapabilities extends the set of - managed capabilities beyond the baseline defined in baselineCapabilitySet. The - default is an empty set. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - baselineCapabilitySet: - description: baselineCapabilitySet selects an initial set of optional - capabilities to enable, which can be extended via additionalEnabledCapabilities. If - unset, the cluster will choose a default, and the default may - change over time. The current default is vCurrent. - enum: - - None - - v4.11 - - v4.12 - - v4.13 - - v4.14 - - v4.15 - - v4.16 - - vCurrent - type: string - type: object - channel: - description: channel is an identifier for explicitly requesting that - a non-default set of updates be applied to this cluster. The default - channel will be contain stable updates that are appropriate for - production clusters. - type: string - clusterID: - description: clusterID uniquely identifies this cluster. This is expected - to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - in hexadecimal values). This is a required field. - type: string - desiredUpdate: - description: "desiredUpdate is an optional field that indicates the - desired value of the cluster version. Setting this value will trigger - an upgrade (if the current version does not match the desired version). - The set of recommended update values is listed as part of available - updates in status, and setting values outside that range may cause - the upgrade to fail. \n Some of the fields are inter-related with - restrictions and meanings described here. 1. image is specified, - version is specified, architecture is specified. API validation - error. 2. image is specified, version is specified, architecture - is not specified. You should not do this. version is silently ignored - and image is used. 3. image is specified, version is not specified, - architecture is specified. API validation error. 4. image is specified, - version is not specified, architecture is not specified. image is - used. 5. image is not specified, version is specified, architecture - is specified. version and desired architecture are used to select - an image. 6. image is not specified, version is specified, architecture - is not specified. version and current architecture are used to select - an image. 7. image is not specified, version is not specified, architecture - is specified. API validation error. 8. image is not specified, version - is not specified, architecture is not specified. API validation - error. \n If an upgrade fails the operator will halt and report - status about the failing component. Setting the desired update value - back to the previous version will cause a rollback to be attempted. - Not all rollbacks will succeed." - properties: - architecture: - description: architecture is an optional field that indicates - the desired value of the cluster architecture. In this context - cluster architecture means either a single architecture or a - multi architecture. architecture can only be set to Multi thereby - only allowing updates from single to multi architecture. If - architecture is set, image cannot be set and version must be - set. Valid values are 'Multi' and empty. - enum: - - Multi - - "" - type: string - force: - description: force allows an administrator to update to an image - that has failed verification or upgradeable checks. This option - should only be used when the authenticity of the provided image - has been verified out of band because the provided image will - run with full administrative access to the cluster. Do not use - this flag with images that comes from unknown or potentially - malicious sources. - type: boolean - image: - description: image is a container image location that contains - the update. image should be used when the desired version does - not exist in availableUpdates or history. When image is set, - version is ignored. When image is set, version should be empty. - When image is set, architecture cannot be specified. - type: string - version: - description: version is a semantic version identifying the update - version. version is ignored if image is specified and required - if architecture is specified. - type: string - type: object - x-kubernetes-validations: - - message: cannot set both Architecture and Image - rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' - - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' - overrides: - description: overrides is list of overides for components that are - managed by cluster version operator. Marking a component unmanaged - will prevent the operator from creating or updating the object. - items: - description: ComponentOverride allows overriding cluster version - operator's behavior for a component. - properties: - group: - description: group identifies the API group that the kind is - in. - type: string - kind: - description: kind indentifies which object to override. - type: string - name: - description: name is the component's name. - type: string - namespace: - description: namespace is the component's namespace. If the - resource is cluster scoped, the namespace should be empty. - type: string - unmanaged: - description: 'unmanaged controls if cluster version operator - should stop managing the resources in this cluster. Default: - false' - type: boolean - required: - - group - - kind - - name - - namespace - - unmanaged - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - group - - namespace - - name - x-kubernetes-list-type: map - signatureStores: - description: "signatureStores contains the upstream URIs to verify - release signatures and optional reference to a config map by name - containing the PEM-encoded CA bundle. \n By default, CVO will use - existing signature stores if this property is empty. The CVO will - check the release signatures in the local ConfigMaps first. It will - search for a valid signature in these stores in parallel only when - local ConfigMaps did not include a valid signature. Validation will - fail if none of the signature stores reply with valid signature - before timeout. Setting signatureStores will replace the default - signature stores with custom signature stores. Default stores can - be used with custom signature stores by adding them manually. \n - A maximum of 32 signature stores may be configured." - items: - description: SignatureStore represents the URL of custom Signature - Store - properties: - ca: - description: ca is an optional reference to a config map by - name containing the PEM-encoded CA bundle. It is used as a - trust anchor to validate the TLS certificate presented by - the remote server. The key "ca.crt" is used to locate the - data. If specified and the config map or expected key is not - found, the signature store is not honored. If the specified - ca data is not valid, the signature store is not honored. - If empty, we fall back to the CA configured via Proxy, which - is appended to the default system roots. The namespace for - this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - url: - description: url contains the upstream custom signature store - URL. url should be a valid absolute http/https URI of an upstream - signature store as per rfc1738. This must be provided and - cannot be empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - url - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - url - x-kubernetes-list-type: map - upstream: - description: upstream may be used to specify the preferred update - server. By default it will use the appropriate update server for - the cluster and region. - type: string - required: - - clusterID - type: object - status: - description: status contains information about the available updates and - any in-progress updates. - properties: - availableUpdates: - description: availableUpdates contains updates recommended for this - cluster. Updates which appear in conditionalUpdates but not in availableUpdates - may expose this cluster to known issues. This list may be empty - if no updates are recommended, if the update service is unavailable, - or if an invalid channel has been specified. - items: - description: Release represents an OpenShift release image and associated - metadata. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or - the metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set - for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - nullable: true - type: array - x-kubernetes-list-type: atomic - capabilities: - description: capabilities describes the state of optional, core cluster - components. - properties: - enabledCapabilities: - description: enabledCapabilities lists all the capabilities that - are currently managed. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - knownCapabilities: - description: knownCapabilities lists all the capabilities known - to the current cluster. - items: - description: ClusterVersionCapability enumerates optional, core - cluster components. - enum: - - openshift-samples - - baremetal - - marketplace - - Console - - Insights - - Storage - - CSISnapshot - - NodeTuning - - MachineAPI - - Build - - DeploymentConfig - - ImageRegistry - - OperatorLifecycleManager - - CloudCredential - - Ingress - - CloudControllerManager - type: string - type: array - x-kubernetes-list-type: atomic - type: object - conditionalUpdates: - description: conditionalUpdates contains the list of updates that - may be recommended for this cluster if it meets specific required - conditions. Consumers interested in the set of updates that are - actually recommended for this cluster should use availableUpdates. - This list may be empty if no updates are recommended, if the update - service is unavailable, or if an empty or invalid channel has been - specified. - items: - description: ConditionalUpdate represents an update which is recommended - to some clusters on the version the current cluster is reconciling, - but which may not be recommended for the current cluster. - properties: - conditions: - description: 'conditions represents the observations of the - conditional update''s current status. Known types are: * Recommended, - for whether the update is recommended for the current cluster.' - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - release: - description: release is the target of the update. - properties: - channels: - description: channels is the set of Cincinnati channels - to which the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is - optional if version is specified and the availableUpdates - field contains a matching version. - type: string - url: - description: url contains information about this release. - This URL is set by the 'url' metadata property on a release - or the metadata returned by the update API and should - be displayed as a link in user interfaces. The URL field - may not be set for test or nightly releases. - type: string - version: - description: version is a semantic version identifying the - update version. When this field is part of spec, version - is optional if image is specified. - type: string - type: object - risks: - description: risks represents the range of issues associated - with updating to the target release. The cluster-version operator - will evaluate all entries, and only recommend the update if - there is at least one entry and all entries recommend the - update. - items: - description: ConditionalUpdateRisk represents a reason and - cluster-state for not recommending a conditional update. - properties: - matchingRules: - description: matchingRules is a slice of conditions for - deciding which clusters match the risk and which do - not. The slice is ordered by decreasing precedence. - The cluster-version operator will walk the slice in - order, and stop after the first it can successfully - evaluate. If no condition can be successfully evaluated, - the update will not be recommended. - items: - description: ClusterCondition is a union of typed cluster - conditions. The 'type' property determines which - of the type-specific properties are relevant. When - evaluated on a cluster, the condition may match, not - match, or fail to evaluate. - properties: - promql: - description: promQL represents a cluster condition - based on PromQL. - properties: - promql: - description: PromQL is a PromQL query classifying - clusters. This query query should return a - 1 in the match case and a 0 in the does-not-match - case. Queries which return no time series, - or which return values besides 0 or 1, are - evaluation failures. - type: string - required: - - promql - type: object - type: - description: type represents the cluster-condition - type. This defines the members and semantics of - any additional properties. - enum: - - Always - - PromQL - type: string - required: - - type - type: object - minItems: 1 - type: array - x-kubernetes-list-type: atomic - message: - description: message provides additional information about - the risk of updating, in the event that matchingRules - match the cluster state. This is only to be consumed - by humans. It may contain Line Feed characters (U+000A), - which should be rendered as new lines. - minLength: 1 - type: string - name: - description: name is the CamelCase reason for not recommending - a conditional update, in the event that matchingRules - match the cluster state. - minLength: 1 - type: string - url: - description: url contains information about this risk. - format: uri - minLength: 1 - type: string - required: - - matchingRules - - message - - name - - url - type: object - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - release - - risks - type: object - type: array - x-kubernetes-list-type: atomic - conditions: - description: conditions provides information about the cluster version. - The condition "Available" is set to true if the desiredUpdate has - been reached. The condition "Progressing" is set to true if an update - is being applied. The condition "Degraded" is set to true if an - update is currently blocked by a temporary or permanent error. Conditions - are only valid for the current desiredUpdate when metadata.generation - is equal to status.generation. - items: - description: ClusterOperatorStatusCondition represents the state - of the operator's managed and monitored components. - properties: - lastTransitionTime: - description: lastTransitionTime is the time of the last update - to the current status property. - format: date-time - type: string - message: - description: message provides additional information about the - current condition. This is only to be consumed by humans. It - may contain Line Feed characters (U+000A), which should be - rendered as new lines. - type: string - reason: - description: reason is the CamelCase reason for the condition's - current status. - type: string - status: - description: status of the condition, one of True, False, Unknown. - type: string - type: - description: type specifies the aspect reported by this condition. - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - desired: - description: desired is the version that the cluster is reconciling - towards. If the cluster is not yet fully initialized desired will - be set with the information available, which may be an image or - a tag. - properties: - channels: - description: channels is the set of Cincinnati channels to which - the release currently belongs. - items: - type: string - type: array - x-kubernetes-list-type: set - image: - description: image is a container image location that contains - the update. When this field is part of spec, image is optional - if version is specified and the availableUpdates field contains - a matching version. - type: string - url: - description: url contains information about this release. This - URL is set by the 'url' metadata property on a release or the - metadata returned by the update API and should be displayed - as a link in user interfaces. The URL field may not be set for - test or nightly releases. - type: string - version: - description: version is a semantic version identifying the update - version. When this field is part of spec, version is optional - if image is specified. - type: string - type: object - history: - description: history contains a list of the most recent versions applied - to the cluster. This value may be empty during cluster startup, - and then will be updated when a new update is being applied. The - newest update is first in the list and it is ordered by recency. - Updates in the history have state Completed if the rollout completed - - if an update was failing or halfway applied the state will be - Partial. Only a limited amount of update history is preserved. - items: - description: UpdateHistory is a single attempted update to the cluster. - properties: - acceptedRisks: - description: acceptedRisks records risks which were accepted - to initiate the update. For example, it may menition an Upgradeable=False - or missing signature that was overriden via desiredUpdate.force, - or an update that was initiated despite not being in the availableUpdates - set of recommended update targets. - type: string - completionTime: - description: completionTime, if set, is when the update was - fully applied. The update that is currently being applied - will have a null completion time. Completion time will always - be set for entries that are not the current update (usually - to the started time of the next update). - format: date-time - nullable: true - type: string - image: - description: image is a container image location that contains - the update. This value is always populated. - type: string - startedTime: - description: startedTime is the time at which the update was - started. - format: date-time - type: string - state: - description: state reflects whether the update was fully applied. - The Partial state indicates the update is not fully applied, - while the Completed state indicates the update was successfully - rolled out at least once (all parts of the update successfully - applied). - type: string - verified: - description: verified indicates whether the provided update - was properly verified before it was installed. If this is - false the cluster may not be trusted. Verified does not cover - upgradeable checks that depend on the cluster state at the - time when the update target was accepted. - type: boolean - version: - description: version is a semantic version identifying the update - version. If the requested image does not define a version, - or if a failure occurs retrieving the image, this value may - be empty. - type: string - required: - - completionTime - - image - - startedTime - - state - - verified - type: object - type: array - x-kubernetes-list-type: atomic - observedGeneration: - description: observedGeneration reports which version of the spec - is being synced. If this value is not equal to metadata.generation, - then the desired and conditions fields may represent a previous - version. - format: int64 - type: integer - versionHash: - description: versionHash is a fingerprint of the content that the - cluster will be updated with. It is used by the operator to avoid - unnecessary work and is for internal use only. - type: string - required: - - availableUpdates - - desired - - observedGeneration - - versionHash - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: the `baremetal` capability requires the `MachineAPI` capability, - which is neither explicitly or implicitly enabled in this cluster, please - enable the `MachineAPI` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' - - message: the `marketplace` capability requires the `OperatorLifecycleManager` - capability, which is neither explicitly or implicitly enabled in this - cluster, please enable the `OperatorLifecycleManager` capability - rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) - && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' - in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' - in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) - && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) - && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) - : true' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml deleted file mode 100644 index b9cf439c5..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ /dev/null @@ -1,106 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: proxies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Proxy - listKind: ProxyList - plural: proxies - singular: proxy - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Proxy holds cluster-wide information on how to configure default - proxies for the cluster. The canonical name is `cluster` \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the proxy configuration - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. Empty - means unset and will not result in an env var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs and/or IPs for which the proxy should not be used. Empty means - unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used to verify - readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing a - CA certificate bundle. The trustedCA field should only be consumed - by a proxy validator. The validator is responsible for reading the - certificate bundle from the required key \"ca-bundle.crt\", merging - it with the system default trust bundle, and writing the merged - trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections must use - the trusted-ca-bundle for all HTTPS requests to the proxy, and may - use the trusted-ca-bundle for non-proxy HTTPS requests as well. - \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". - Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate - bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS requests. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames and/or - CIDRs for which the proxy should not be used. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml deleted file mode 100644 index cc42ea290..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - capability.openshift.io/name: marketplace - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: operatorhubs.config.openshift.io -spec: - group: config.openshift.io - names: - kind: OperatorHub - listKind: OperatorHubList - plural: operatorhubs - singular: operatorhub - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OperatorHub is the Schema for the operatorhubs API. It can be - used to change the state of the default hub sources for OperatorHub on the - cluster from enabled to disabled and vice versa. \n Compatibility level - 1: Stable within a major release for a minimum of 12 months or 3 minor releases - (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OperatorHubSpec defines the desired state of OperatorHub - properties: - disableAllDefaultSources: - description: disableAllDefaultSources allows you to disable all the - default hub sources. If this is true, a specific entry in sources - can be used to enable a default source. If this is false, a specific - entry in sources can be used to disable or enable a default source. - type: boolean - sources: - description: sources is the list of default hub sources and their - configuration. If the list is empty, it implies that the default - hub sources are enabled on the cluster unless disableAllDefaultSources - is true. If disableAllDefaultSources is true and sources is not - empty, the configuration present in sources will take precedence. - The list of default hub sources and their current state will always - be reflected in the status block. - items: - description: HubSource is used to specify the hub source and its - configuration - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - name: - description: name is the name of one of the default hub sources - maxLength: 253 - minLength: 1 - type: string - type: object - type: array - type: object - status: - description: OperatorHubStatus defines the observed state of OperatorHub. - The current state of the default hub sources will always be reflected - here. - properties: - sources: - description: sources encapsulates the result of applying the configuration - for each hub source - items: - description: HubSourceStatus is used to reflect the current state - of applying the configuration to a default source - properties: - disabled: - description: disabled is used to disable a default hub source - on cluster - type: boolean - message: - description: message provides more information regarding failures - type: string - name: - description: name is the name of one of the default hub sources - maxLength: 253 - minLength: 1 - type: string - status: - description: status indicates success or failure in applying - the configuration - type: string - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml deleted file mode 100644 index aeefb69bc..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml deleted file mode 100644 index f4445e768..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 37b054d19..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,315 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: apiservers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: APIServer - listKind: APIServerList - plural: apiservers - singular: apiserver - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "APIServer holds configuration (like serving certificates, client - CA and CORS domains) shared by all API servers in the system, among them - especially kube-apiserver and openshift-apiserver. The canonical name of - an instance is 'cluster'. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalCORSAllowedOrigins: - description: additionalCORSAllowedOrigins lists additional, user-defined - regular expressions describing hosts for which the API server allows - access using the CORS headers. This may be needed to access the - API and the integrated OAuth server from JavaScript applications. - The values are regular expressions that correspond to the Golang - regular expression language. - items: - type: string - type: array - audit: - default: - profile: Default - description: audit specifies the settings for audit configuration - to be applied to all OpenShift-provided API servers in the cluster. - properties: - customRules: - description: customRules specify profiles per group. These profile - take precedence over the top-level profile field if they apply. - They are evaluation from top to bottom and the first one that - matches, applies. - items: - description: AuditCustomRule describes a custom rule for an - audit profile that takes precedence over the top-level profile. - properties: - group: - description: group is a name of group a request user must - be member of in order to this profile to apply. - minLength: 1 - type: string - profile: - description: "profile specifies the name of the desired - audit policy configuration to be deployed to all OpenShift-provided - API servers in the cluster. \n The following profiles - are provided: - Default: the existing default policy. - - WriteRequestBodies: like 'Default', but logs request - and response HTTP payloads for write requests (create, - update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read - requests (get, list). - None: no requests are logged at - all, not even oauthaccesstokens and oauthauthorizetokens. - \n If unset, the 'Default' profile is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - required: - - group - - profile - type: object - type: array - x-kubernetes-list-map-keys: - - group - x-kubernetes-list-type: map - profile: - default: Default - description: "profile specifies the name of the desired top-level - audit profile to be applied to all requests sent to any of the - OpenShift-provided API servers in the cluster (kube-apiserver, - openshift-apiserver and oauth-apiserver), with the exception - of those requests that match one or more of the customRules. - \n The following profiles are provided: - Default: default policy - which means MetaData level logging with the exception of events - (not logged at all), oauthaccesstokens and oauthauthorizetokens - (both logged at RequestBody level). - WriteRequestBodies: like - 'Default', but logs request and response HTTP payloads for write - requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', - but also logs request and response HTTP payloads for read requests - (get, list). - None: no requests are logged at all, not even - oauthaccesstokens and oauthauthorizetokens. \n Warning: It is - not recommended to disable audit logging by using the `None` - profile unless you are fully aware of the risks of not logging - data that can be beneficial when troubleshooting issues. If - you disable audit logging and a support situation arises, you - might need to enable audit logging and reproduce the issue in - order to troubleshoot properly. \n If unset, the 'Default' profile - is used as the default." - enum: - - Default - - WriteRequestBodies - - AllRequestBodies - - None - type: string - type: object - clientCA: - description: 'clientCA references a ConfigMap containing a certificate - bundle for the signers that will be recognized for incoming client - certificates in addition to the operator managed signers. If this - is empty, then only operator managed signers are valid. You usually - only have to set this if you have your own PKI you wish to honor - client certificates from. The ConfigMap must exist in the openshift-config - namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - - CA bundle.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - encryption: - description: encryption allows the configuration of encryption of - resources at the datastore layer. - properties: - type: - description: "type defines what encryption type should be used - to encrypt resources at the datastore layer. When this field - is unset (i.e. when it is set to the empty string), identity - is implied. The behavior of unset can and will change over time. - \ Even if encryption is enabled by default, the meaning of unset - may change to a different encryption type based on changes in - best practices. \n When encryption is enabled, all sensitive - resources shipped with the platform are encrypted. This list - of sensitive resources can and will change over time. The current - authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io - 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" - enum: - - "" - - identity - - aescbc - - aesgcm - type: string - type: object - servingCerts: - description: servingCert is the TLS cert info for serving secure traffic. - If not specified, operator managed certificates will be used for - serving secure traffic. - properties: - namedCertificates: - description: namedCertificates references secrets containing the - TLS cert info for serving secure traffic to specific hostnames. - If no named certificates are provided, or no named certificates - match the server name as understood by a client, the defaultServingCertificate - will be used. - items: - description: APIServerNamedServingCert maps a server DNS name, - as understood by a client, to a certificate. - properties: - names: - description: names is a optional list of explicit DNS names - (leading wildcards allowed) that should use this certificate - to serve secure traffic. If no names are provided, the - implicit names will be extracted from the certificates. - Exact names trump over wildcard names. Explicit names - defined here trump over extracted implicit names. - items: - type: string - type: array - servingCertificate: - description: 'servingCertificate references a kubernetes.io/tls - type secret containing the TLS cert info for serving secure - traffic. The secret must exist in the openshift-config - namespace and contain the following required fields: - - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - - TLS certificate.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - type: object - tlsSecurityProfile: - description: "tlsSecurityProfile specifies settings for TLS connections - for externally exposed servers. \n If unset, a default (which may - change between releases) is chosen. Note that only Old, Intermediate - and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12." - properties: - custom: - description: "custom is a user-defined TLS security profile. Be - extremely careful using a custom profile as invalid configurations - can be catastrophic. An example custom profile looks like this: - \n ciphers: \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - ECDHE-RSA-AES128-GCM-SHA256 \n - ECDHE-ECDSA-AES128-GCM-SHA256 - \n minTLSVersion: VersionTLS11" - nullable: true - properties: - ciphers: - description: "ciphers is used to specify the cipher algorithms - that are negotiated during the TLS handshake. Operators - may remove entries their operands do not support. For example, - to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" - items: - type: string - type: array - minTLSVersion: - description: "minTLSVersion is used to specify the minimal - version of the TLS protocol that is negotiated during the - TLS handshake. For example, to use TLS versions 1.1, 1.2 - and 1.3 (yaml): \n minTLSVersion: VersionTLS11 \n NOTE: - currently the highest minTLSVersion allowed is VersionTLS12" - enum: - - VersionTLS10 - - VersionTLS11 - - VersionTLS12 - - VersionTLS13 - type: string - type: object - intermediate: - description: "intermediate is a TLS security profile based on: - \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n minTLSVersion: VersionTLS12" - nullable: true - type: object - modern: - description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n minTLSVersion: VersionTLS13" - nullable: true - type: object - old: - description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility - \n and looks like this (yaml): \n ciphers: \n - TLS_AES_128_GCM_SHA256 - \n - TLS_AES_256_GCM_SHA384 \n - TLS_CHACHA20_POLY1305_SHA256 - \n - ECDHE-ECDSA-AES128-GCM-SHA256 \n - ECDHE-RSA-AES128-GCM-SHA256 - \n - ECDHE-ECDSA-AES256-GCM-SHA384 \n - ECDHE-RSA-AES256-GCM-SHA384 - \n - ECDHE-ECDSA-CHACHA20-POLY1305 \n - ECDHE-RSA-CHACHA20-POLY1305 - \n - DHE-RSA-AES128-GCM-SHA256 \n - DHE-RSA-AES256-GCM-SHA384 - \n - DHE-RSA-CHACHA20-POLY1305 \n - ECDHE-ECDSA-AES128-SHA256 - \n - ECDHE-RSA-AES128-SHA256 \n - ECDHE-ECDSA-AES128-SHA \n - - ECDHE-RSA-AES128-SHA \n - ECDHE-ECDSA-AES256-SHA384 \n - ECDHE-RSA-AES256-SHA384 - \n - ECDHE-ECDSA-AES256-SHA \n - ECDHE-RSA-AES256-SHA \n - DHE-RSA-AES128-SHA256 - \n - DHE-RSA-AES256-SHA256 \n - AES128-GCM-SHA256 \n - AES256-GCM-SHA384 - \n - AES128-SHA256 \n - AES256-SHA256 \n - AES128-SHA \n - AES256-SHA - \n - DES-CBC3-SHA \n minTLSVersion: VersionTLS10" - nullable: true - type: object - type: - description: "type is one of Old, Intermediate, Modern or Custom. - Custom provides the ability to specify individual TLS security - profile parameters. Old, Intermediate and Modern are TLS security - profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations - \n The profiles are intent based, so they may change over time - as new ciphers are developed and existing ciphers are found - to be insecure. Depending on precisely which ciphers are available - to a process, the list may be reduced. \n Note that the Modern - profile is currently not supported because it is not yet well - adopted by common software libraries." - enum: - - Old - - Intermediate - - Modern - - Custom - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml deleted file mode 100644 index 6d8a5da7e..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml +++ /dev/null @@ -1,554 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml deleted file mode 100644 index 2cabddacf..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml +++ /dev/null @@ -1,552 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch deleted file mode 100644 index dcc254fbd..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml-patch +++ /dev/null @@ -1,285 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/oidcProviders - value: - description: "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". \n At most one provider can be configured." - type: array - maxItems: 1 - items: - type: object - required: - - issuer - - name - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform information from an ID token into a cluster identity - type: object - properties: - groups: - description: Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. - type: object - required: - - claim - properties: - claim: - description: Claim is a JWT token claim to be used in the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value from the token in the result of the claim mapping. \n By default, no prefixing occurs. \n Example: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - username: - description: "Username is a name of the claim that should be used to construct usernames for the cluster identity. \n Default value: \"sub\"" - type: object - required: - - claim - properties: - claim: - description: Claim is a JWT token claim to be used in the mapping - type: string - prefix: - type: object - required: - - prefixString - properties: - prefixString: - type: string - minLength: 1 - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should apply. \n By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. \n Set to \"NoPrefix\" to disable prefixing. \n Example: (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\". If the JWT claim `username` contains value `userA`, the resulting mapped value will be \"myoidc:userA\". (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the JWT `email` claim contains value \"userA@myoidc.tld\", the resulting mapped value will be \"myoidc:userA@myoidc.tld\". (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\", and `claim` is set to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - type: string - enum: - - "" - - NoPrefix - - Prefix - x-kubernetes-validations: - - rule: 'has(self.prefixPolicy) && self.prefixPolicy == ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)' - message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise - claimValidationRules: - description: ClaimValidationRules are rules that are applied to validate token claims to authenticate users. - type: array - items: - type: object - properties: - requiredClaim: - description: RequiredClaim allows configuring a required claim name and its expected value - type: object - required: - - claim - - requiredValue - properties: - claim: - description: Claim is a name of a required claim. Only claims with string values are supported. - type: string - minLength: 1 - requiredValue: - description: RequiredValue is the required value for the claim. - type: string - minLength: 1 - type: - description: Type sets the type of the validation rule - type: string - default: RequiredClaim - enum: - - RequiredClaim - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - type: object - required: - - audiences - - issuerURL - properties: - audiences: - description: Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. - type: array - maxItems: 10 - minItems: 1 - items: - type: string - minLength: 1 - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the "ca-bundle.crt" key. If unset, system trust is used instead. - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced config map - type: string - issuerURL: - description: URL is the serving URL of the token issuer. Must use the https:// scheme. - type: string - pattern: ^https:\/\/[^\s] - name: - description: Name of the OIDC provider - type: string - minLength: 1 - oidcClients: - description: OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer - type: array - maxItems: 20 - items: - type: object - required: - - clientID - - componentName - - componentNamespace - properties: - clientID: - description: ClientID is the identifier of the OIDC client from the OIDC provider - type: string - minLength: 1 - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field - type: object - required: - - name - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - componentName: - description: ComponentName is the name of the component that is supposed to consume this client configuration - type: string - maxLength: 256 - minLength: 1 - componentNamespace: - description: ComponentNamespace is the namespace of the component that is supposed to consume this client configuration - type: string - maxLength: 63 - minLength: 1 - extraScopes: - description: ExtraScopes is an optional set of scopes to request tokens with. - type: array - items: - type: string - x-kubernetes-list-type: set - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/status/properties/oidcClients - value: - description: OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of the `oidcClients` entry. \n Supported conditions include Available, Degraded and Progressing. \n If Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry." - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/x-kubernetes-validations - value: - - message: all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))' -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/type/enum - value: - - "" - - None - - IntegratedOAuth - - OIDC diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml deleted file mode 100644 index 87e2434db..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-Default.yaml +++ /dev/null @@ -1,171 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml deleted file mode 100644 index cf6913a8f..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml +++ /dev/null @@ -1,555 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - formatted: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Authentication specifies cluster-wide settings for authentication - (like OAuth and webhook token authenticators). The canonical name of an - instance is `cluster`. \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: 'oauthMetadata contains the discovery endpoint data for - OAuth 2.0 Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw ''/.well-known/oauth-authorization-server'' For further - details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence over - any metadata reference stored in status. The key "oauthMetadata" - is used to locate the data. If specified and the config map or expected - key is not found, no metadata is served. If the specified metadata - is not valid, no metadata is served. The namespace for this config - map is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: "OIDCProviders are OIDC identity providers that can issue - tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". - \n At most one provider can be configured." - items: - properties: - claimMappings: - description: ClaimMappings describes rules on how to transform - information from an ID token into a cluster identity - properties: - groups: - description: Groups is a name of the claim that should be - used to construct groups for the cluster identity. The - referenced claim must use array of strings values. - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - description: "Prefix is a string to prefix the value - from the token in the result of the claim mapping. - \n By default, no prefixing occurs. \n Example: if - `prefix` is set to \"myoidc:\"\" and the `claim` in - JWT contains an array of strings \"a\", \"b\" and - \ \"c\", the mapping will result in an array of string - \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." - type: string - required: - - claim - type: object - username: - description: "Username is a name of the claim that should - be used to construct usernames for the cluster identity. - \n Default value: \"sub\"" - properties: - claim: - description: Claim is a JWT token claim to be used in - the mapping - type: string - prefix: - properties: - prefixString: - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: "PrefixPolicy specifies how a prefix should - apply. \n By default, claims other than `email` will - be prefixed with the issuer URL to prevent naming - clashes with other plugins. \n Set to \"NoPrefix\" - to disable prefixing. \n Example: (1) `prefix` is - set to \"myoidc:\" and `claim` is set to \"username\". - If the JWT claim `username` contains value `userA`, - the resulting mapped value will be \"myoidc:userA\". - (2) `prefix` is set to \"myoidc:\" and `claim` is - set to \"email\". If the JWT `email` claim contains - value \"userA@myoidc.tld\", the resulting mapped value - will be \"myoidc:userA@myoidc.tld\". (3) `prefix` - is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include \"username\":\"userA\" and - \"email\":\"userA@myoidc.tld\", and `claim` is set - to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" - (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - type: object - claimValidationRules: - description: ClaimValidationRules are rules that are applied - to validate token claims to authenticate users. - items: - properties: - requiredClaim: - description: RequiredClaim allows configuring a required - claim name and its expected value - properties: - claim: - description: Claim is a name of a required claim. - Only claims with string values are supported. - minLength: 1 - type: string - requiredValue: - description: RequiredValue is the required value for - the claim. - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: Type sets the type of the validation rule - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: Issuer describes atributes of the OIDC token issuer - properties: - audiences: - description: Audiences is an array of audiences that the - token was issued for. Valid tokens must include at least - one of these values in their "aud" claim. Must be set - to exactly one value. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: CertificateAuthority is a reference to a config - map in the configuration namespace. The .data of the configMap - must contain the "ca-bundle.crt" key. If unset, system - trust is used instead. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: Name of the OIDC provider - minLength: 1 - type: string - oidcClients: - description: OIDCClients contains configuration for the platform's - clients that need to request tokens from the issuer - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - clientSecret: - description: ClientSecret refers to a secret in the `openshift-config` - namespace that contains the client secret in the `clientSecret` - key of the `.data` field - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: ComponentName is the name of the component - that is supposed to consume this client configuration - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the - component that is supposed to consume this client configuration - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: ExtraScopes is an optional set of scopes - to request tokens with. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: 'serviceAccountIssuer is the identifier of the bound - service account token issuer. The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation - of all bound tokens with the previous issuer value. Instead, the - tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set - to 24h). This time period is subject to change over time. This allows - internal components to transition to use new service account issuer - without service distruption.' - type: string - type: - description: type identifies the cluster managed, user facing authentication - mode in use. Specifically, it manages the component that responds - to login attempts. The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: "webhookTokenAuthenticator configures a remote token - reviewer. These remote authentication webhooks can be used to verify - bearer tokens via the tokenreviews.authentication.k8s.io REST API. - This is required to honor bearer tokens that are provisioned by - an external authentication service. \n Can only be set if \"Type\" - is set to \"None\"." - properties: - kubeConfig: - description: "kubeConfig references a secret that contains kube - config file data which describes how to access the remote webhook - service. The namespace for the referenced secret is openshift-config. - \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - \n The key \"kubeConfig\" is used to locate the data. If the - secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored." - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: deprecatedWebhookTokenAuthenticator holds the necessary - configuration options for a remote token authenticator. It's the - same as WebhookTokenAuthenticator but it's missing the 'required' - validation on KubeConfig field. - properties: - kubeConfig: - description: 'kubeConfig contains kube config file data which - describes how to access the remote webhook service. For further - details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. If the secret - or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook - is not honored. The namespace for this secret is determined - by the point of use.' - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: 'integratedOAuthMetadata contains the discovery endpoint - data for OAuth 2.0 Authorization Server Metadata for the in-cluster - integrated OAuth server. This discovery document can be viewed from - its served location: oc get --raw ''/.well-known/oauth-authorization-server'' - For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. An explicitly - set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set - to IntegratedOAuth. The key "oauthMetadata" is used to locate the - data. If the config map or expected key is not found, no metadata - is served. If the specified metadata is not valid, no metadata is - served. The namespace for this config map is openshift-config-managed.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: OIDCClients is where participating operators place the - current OIDC client status for OIDC clients that can be customized - by the cluster-admin. - items: - properties: - componentName: - description: ComponentName is the name of the component that - will consume a client configuration. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: ComponentNamespace is the namespace of the component - that will consume a client configuration. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: "Conditions are used to communicate the state of - the `oidcClients` entry. \n Supported conditions include Available, - Degraded and Progressing. \n If Available is true, the component - is successfully using the configured client. If Degraded is - true, that means something has gone wrong trying to handle - the client configuration. If Progressing is true, that means - the component is taking some action related to the `oidcClients` - entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: ConsumingUsers is a slice of ServiceAccounts that - need to have read permission on the `clientSecret` secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the - component is currently using. - items: - properties: - clientID: - description: ClientID is the identifier of the OIDC client - from the OIDC provider - minLength: 1 - type: string - issuerURL: - description: URL is the serving URL of the token issuer. - Must use the https:// scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: OIDCName refers to the `name` of the provider - from `oidcProviders` - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml deleted file mode 100644 index ce7f789da..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: consoles.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Console - listKind: ConsoleList - plural: consoles - singular: console - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Console holds cluster-wide configuration for the web console, - including the logout URL, and reports the public URL of the console. The - canonical name is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - authentication: - description: ConsoleAuthentication defines a list of optional configuration - for console authentication. - properties: - logoutRedirect: - description: 'An optional, absolute URL to redirect web browsers - to after logging out of the console. If not specified, it will - redirect to the default login page. This is required when using - an identity provider that supports single sign-on (SSO) such - as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, - SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console - will destroy the user''s token. The logoutRedirect provides - the user the option to perform single logout (SLO) through the - identity provider to destroy their single sign-on session.' - pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - consoleURL: - description: The URL for the console. This will be derived from the - host for the route that is created for the console. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 7b1bee406..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml deleted file mode 100644 index d2a3e7dc4..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index b5fe24073..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,159 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: dnses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: DNS - listKind: DNSList - plural: dnses - singular: dns - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "DNS holds cluster-wide information about DNS. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - baseDomain: - description: "baseDomain is the base domain of the cluster. All managed - DNS records will be sub-domains of this base. \n For example, given - the base domain `openshift.example.com`, an API server DNS record - may be created for `cluster-api.openshift.example.com`. \n Once - set, this field cannot be changed." - type: string - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for DNS. When omitted, this means the user - has no opinion and the platform is left to choose reasonable defaults. - These defaults are subject to change over time. - properties: - aws: - description: aws contains DNS configuration specific to the Amazon - Web Services cloud provider. - properties: - privateZoneIAMRole: - description: privateZoneIAMRole contains the ARN of an IAM - role that should be assumed when performing operations on - the cluster's private hosted zone specified in the cluster - DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ - type: string - type: object - type: - description: "type is the underlying infrastructure provider for - the cluster. Allowed values: \"\", \"AWS\". \n Individual components - may not support all platforms, and must handle unrecognized - platforms with best-effort defaults." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - x-kubernetes-validations: - - message: allowed values are '' and 'AWS' - rule: self in ['','AWS'] - required: - - type - type: object - x-kubernetes-validations: - - message: aws configuration is required when platform is AWS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : - !has(self.aws)' - privateZone: - description: "privateZone is the location where all the DNS records - that are only available internally to the cluster exist. \n If this - field is nil, no private records should be created. \n Once set, - this field cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - publicZone: - description: "publicZone is the location where all the DNS records - that are publicly accessible to the internet exist. \n If this field - is nil, no public records should be created. \n Once set, this field - cannot be changed." - properties: - id: - description: "id is the identifier that can be used to find the - DNS hosted zone. \n on AWS zone can be fetched using `ID` as - id in [1] on Azure zone can be fetched using `ID` as a pre-determined - name in [2], on GCP zone can be fetched using `ID` as a pre-determined - name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options - [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show - [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" - type: string - tags: - additionalProperties: - type: string - description: "tags can be used to query the DNS hosted zone. \n - on AWS, resourcegroupstaggingapi [1] can be used to fetch a - zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" - type: object - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml deleted file mode 100644 index 159260e60..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml +++ /dev/null @@ -1,213 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: featuregates.config.openshift.io -spec: - group: config.openshift.io - names: - kind: FeatureGate - listKind: FeatureGateList - plural: featuregates - singular: featuregate - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Feature holds cluster-wide information about feature gates. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - customNoUpgrade: - description: customNoUpgrade allows the enabling or disabling of any - feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE - UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting - cannot be validated. If you have any typos or accidentally apply - invalid combinations your cluster may fail in an unrecoverable way. featureSet - must equal "CustomNoUpgrade" must be set to use this field. - nullable: true - properties: - disabled: - description: disabled is a list of all feature gates that you - want to force off - items: - description: FeatureGateName is a string to enforce patterns - on the name of a FeatureGate - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - type: array - enabled: - description: enabled is a list of all feature gates that you want - to force on - items: - description: FeatureGateName is a string to enforce patterns - on the name of a FeatureGate - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - type: array - type: object - featureSet: - description: featureSet changes the list of features in the cluster. The - default is empty. Be very careful adjusting this setting. Turning - on or off features may cause irreversible changes in your cluster - which cannot be undone. - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - conditions: - description: 'conditions represent the observations of the current - state. Known .status.conditions.type are: "DeterminationDegraded"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - featureGates: - description: featureGates contains a list of enabled and disabled - featureGates that are keyed by payloadVersion. Operators other than - the CVO and cluster-config-operator, must read the .status.featureGates, - locate the version they are managing, find the enabled/disabled - featuregates and make the operand and operator match. The enabled/disabled - values for a particular version may change during the life of the - cluster as various .spec.featureSet values are selected. Operators - may choose to restart their processes to pick up these changes, - but remembering past enable/disable lists is beyond the scope of - this API and is the responsibility of individual operators. Only - featureGates with .version in the ClusterVersion.status will be - present in this list. - items: - properties: - disabled: - description: disabled is a list of all feature gates that are - disabled in the cluster for the named version. - items: - properties: - name: - description: name is the name of the FeatureGate. - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - required: - - name - type: object - type: array - enabled: - description: enabled is a list of all feature gates that are - enabled in the cluster for the named version. - items: - properties: - name: - description: name is the name of the FeatureGate. - pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ - type: string - required: - - name - type: object - type: array - version: - description: version matches the version provided by the ClusterVersion - and in the ClusterOperator.Status.Versions field. - type: string - required: - - version - type: object - type: array - x-kubernetes-list-map-keys: - - version - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml deleted file mode 100644 index f53396aec..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml +++ /dev/null @@ -1,162 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: images.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Image - listKind: ImageList - plural: images - singular: image - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Image governs policies related to imagestream imports and runtime - configuration for external registries. It allows cluster admins to configure - which registries OpenShift is allowed to import images from, extra CA trust - bundles for external registries, and policies to block or allow registry - hostnames. When exposing OpenShift's image registry to the public, this - also lets cluster admins specify the external hostname. \n Compatibility - level 1: Stable within a major release for a minimum of 12 months or 3 minor - releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - additionalTrustedCA: - description: additionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted during imagestream import, - pod image pull, build image pull, and imageregistry pullthrough. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - allowedRegistriesForImport: - description: allowedRegistriesForImport limits the container image - registries that normal users may import images from. Set this list - to the registries that you trust to contain valid Docker images - and that you want applications to be able to import from. Users - with permission to create Images or ImageStreamMappings via the - API are not affected by this policy - typically only administrators - or system integrations will have those permissions. - items: - description: RegistryLocation contains a location of the registry - specified by the registry domain name. The domain name might include - wildcards, like '*' or '??'. - properties: - domainName: - description: domainName specifies a domain name for the registry - In case the registry use non-standard (80 or 443) port, the - port should be included in the domain name as well. - type: string - insecure: - description: insecure indicates whether the registry is secure - (https) or insecure (http) By default (if not specified) the - registry is assumed as secure. - type: boolean - type: object - type: array - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for - the default external image registry. The external hostname should - be set only when the image registry is exposed externally. The first - value is used in 'publicDockerImageRepository' field in ImageStreams. - The value must be in "hostname[:port]" format. - items: - type: string - type: array - registrySources: - description: registrySources contains configuration that determines - how the container runtime should treat individual registries when - accessing images for builds+pods. (e.g. whether or not to allow - insecure access). It does not contain configuration for the internal - cluster registry. - properties: - allowedRegistries: - description: "allowedRegistries are the only registries permitted - for image pull and push actions. All other registries are denied. - \n Only one of BlockedRegistries or AllowedRegistries may be - set." - items: - type: string - type: array - blockedRegistries: - description: "blockedRegistries cannot be used for image pull - and push actions. All other registries are permitted. \n Only - one of BlockedRegistries or AllowedRegistries may be set." - items: - type: string - type: array - containerRuntimeSearchRegistries: - description: 'containerRuntimeSearchRegistries are registries - that will be searched when pulling images that do not have fully - qualified domains in their pull specs. Registries will be searched - in the order provided in the list. Note: this search list only - works with the container runtime, i.e CRI-O. Will NOT work with - builds or imagestream imports.' - format: hostname - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - insecureRegistries: - description: insecureRegistries are registries which do not have - a valid TLS certificates or only support HTTP connections. - items: - type: string - type: array - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - externalRegistryHostnames: - description: externalRegistryHostnames provides the hostnames for - the default external image registry. The external hostname should - be set only when the image registry is exposed externally. The first - value is used in 'publicDockerImageRepository' field in ImageStreams. - The value must be in "hostname[:port]" format. - items: - type: string - type: array - internalRegistryHostname: - description: internalRegistryHostname sets the hostname for the default - internal image registry. The value must be in "hostname[:port]" - format. This value is set by the image registry operator which controls - the internal registry hostname. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml deleted file mode 100644 index 2e30bc552..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml +++ /dev/null @@ -1,112 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/874 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagecontentpolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageContentPolicy - listKind: ImageContentPolicyList - plural: imagecontentpolicies - singular: imagecontentpolicy - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageContentPolicy holds cluster-wide information about how - to handle registry mirror rules. When multiple policies are defined, the - outcome of the behavior is defined on each field. \n Compatibility level - 1: Stable within a major release for a minimum of 12 months or 3 minor releases - (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - repositoryDigestMirrors: - description: "repositoryDigestMirrors allows images referenced by - image digests in pods to be pulled from alternative mirrored repository - locations. The image pull specification provided to the pod will - be compared to the source locations described in RepositoryDigestMirrors - and the image may be pulled down from any of the mirrors in the - list instead of the specified repository allowing administrators - to choose a potentially faster mirror. To pull image from mirrors - by tags, should set the \"allowMirrorByTags\". \n Each “source†- repository is treated independently; configurations for different - “source†repositories don’t interact. \n If the \"mirrors\" is not - specified, the image will continue to be pulled from the specified - repository in the pull spec. \n When multiple policies are defined - for the same “source†repository, the sets of defined mirrors will - be merged together, preserving the relative order of the mirrors, - if possible. For example, if policy A has mirrors `a, b, c` and - policy B has mirrors `c, d, e`, the mirrors will be used in the - order `a, b, c, d, e`. If the orders of mirror entries conflict - (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the - resulting order is unspecified." - items: - description: RepositoryDigestMirrors holds cluster-wide information - about how to handle mirrors in the registries config. - properties: - allowMirrorByTags: - description: allowMirrorByTags if true, the mirrors can be used - to pull the images that are referenced by their tags. Default - is false, the mirrors only work when pulling the images that - are referenced by their digests. Pulling images by tag can - potentially yield different images, depending on which endpoint - we pull from. Forcing digest-pulls for mirrors avoids that - issue. - type: boolean - mirrors: - description: mirrors is zero or more repositories that may also - contain the same images. If the "mirrors" is not specified, - the image will continue to be pulled from the specified repository - in the pull spec. No mirror will be configured. The order - of mirrors in this list is treated as the user's desired priority, - while source is by default considered lower priority than - all mirrors. Other cluster configuration, including (but not - limited to) other repositoryDigestMirrors objects, may impact - the exact order mirrors are contacted in, or some mirrors - may be contacted in parallel, so this should be considered - a preference rather than a guarantee of ordering. - items: - pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: source is the repository that users refer to, e.g. - in image pull specifications. - pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-map-keys: - - source - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml deleted file mode 100644 index 422e46d43..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml +++ /dev/null @@ -1,141 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagedigestmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageDigestMirrorSet - listKind: ImageDigestMirrorSetList - plural: imagedigestmirrorsets - shortNames: - - idms - singular: imagedigestmirrorset - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageDigestMirrorSet holds cluster-wide information about how - to handle registry mirror rules on using digest pull specification. When - multiple policies are defined, the outcome of the behavior is defined on - each field. \n Compatibility level 1: Stable within a major release for - a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - imageDigestMirrors: - description: "imageDigestMirrors allows images referenced by image - digests in pods to be pulled from alternative mirrored repository - locations. The image pull specification provided to the pod will - be compared to the source locations described in imageDigestMirrors - and the image may be pulled down from any of the mirrors in the - list instead of the specified repository allowing administrators - to choose a potentially faster mirror. To use mirrors to pull images - using tag specification, users should configure a list of mirrors - using \"ImageTagMirrorSet\" CRD. \n If the image pull specification - matches the repository of \"source\" in multiple imagedigestmirrorset - objects, only the objects which define the most specific namespace - match will be used. For example, if there are objects using quay.io/libpod - and quay.io/libpod/busybox as the \"source\", only the objects using - quay.io/libpod/busybox are going to apply for pull specification - quay.io/libpod/busybox. Each “source†repository is treated independently; - configurations for different “source†repositories don’t interact. - \n If the \"mirrors\" is not specified, the image will continue - to be pulled from the specified repository in the pull spec. \n - When multiple policies are defined for the same “source†repository, - the sets of defined mirrors will be merged together, preserving - the relative order of the mirrors, if possible. For example, if - policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, - the mirrors will be used in the order `a, b, c, d, e`. If the orders - of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration - is not rejected but the resulting order is unspecified. Users who - want to use a specific order of mirrors, should configure them into - one list of mirrors using the expected order." - items: - description: ImageDigestMirrors holds cluster-wide information about - how to handle mirrors in the registries config. - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy - if fails to pull image from the mirrors. If unset, the image - will continue to be pulled from the the repository in the - pull spec. sourcePolicy is valid configuration only when one - or more mirrors are in the mirror list. - enum: - - NeverContactSource - - AllowContactingSource - type: string - mirrors: - description: 'mirrors is zero or more locations that may also - contain the same images. No mirror will be configured if not - specified. Images can be pulled from these mirrors only if - they are referenced by their digests. The mirrored location - is obtained by replacing the part of the input reference that - matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo - reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat - causes a mirror.local/redhat/product/repo repository to be - used. The order of mirrors in this list is treated as the - user''s desired priority, while source is by default considered - lower priority than all mirrors. If no mirror is specified - or all image pulls from the mirror list fail, the image will - continue to be pulled from the repository in the pull spec - unless explicitly prohibited by "mirrorSourcePolicy" Other - cluster configuration, including (but not limited to) other - imageDigestMirrors objects, may impact the exact order mirrors - are contacted in, or some mirrors may be contacted in parallel, - so this should be considered a preference rather than a guarantee - of ordering. "mirrors" uses one of the following formats: - host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - for more information about the format, see the document about - the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - items: - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer - to, e.g. in image pull specifications. Setting source to a - registry hostname e.g. docker.io. quay.io, or registry.redhat.io, - will match the image pull specification of corressponding - registry. "source" uses one of the following formats: host[:port] - host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - [*.]host for more information about the format, see the document - about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status contains the observed state of the resource. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml deleted file mode 100644 index abcab0166..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml +++ /dev/null @@ -1,144 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1126 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: imagetagmirrorsets.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImageTagMirrorSet - listKind: ImageTagMirrorSetList - plural: imagetagmirrorsets - shortNames: - - itms - singular: imagetagmirrorset - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "ImageTagMirrorSet holds cluster-wide information about how to - handle registry mirror rules on using tag pull specification. When multiple - policies are defined, the outcome of the behavior is defined on each field. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - imageTagMirrors: - description: "imageTagMirrors allows images referenced by image tags - in pods to be pulled from alternative mirrored repository locations. - The image pull specification provided to the pod will be compared - to the source locations described in imageTagMirrors and the image - may be pulled down from any of the mirrors in the list instead of - the specified repository allowing administrators to choose a potentially - faster mirror. To use mirrors to pull images using digest specification - only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" - CRD. \n If the image pull specification matches the repository of - \"source\" in multiple imagetagmirrorset objects, only the objects - which define the most specific namespace match will be used. For - example, if there are objects using quay.io/libpod and quay.io/libpod/busybox - as the \"source\", only the objects using quay.io/libpod/busybox - are going to apply for pull specification quay.io/libpod/busybox. - Each “source†repository is treated independently; configurations - for different “source†repositories don’t interact. \n If the \"mirrors\" - is not specified, the image will continue to be pulled from the - specified repository in the pull spec. \n When multiple policies - are defined for the same “source†repository, the sets of defined - mirrors will be merged together, preserving the relative order of - the mirrors, if possible. For example, if policy A has mirrors `a, - b, c` and policy B has mirrors `c, d, e`, the mirrors will be used - in the order `a, b, c, d, e`. If the orders of mirror entries conflict - (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the - resulting order is unspecified. Users who want to use a deterministic - order of mirrors, should configure them into one list of mirrors - using the expected order." - items: - description: ImageTagMirrors holds cluster-wide information about - how to handle mirrors in the registries config. - properties: - mirrorSourcePolicy: - description: mirrorSourcePolicy defines the fallback policy - if fails to pull image from the mirrors. If unset, the image - will continue to be pulled from the repository in the pull - spec. sourcePolicy is valid configuration only when one or - more mirrors are in the mirror list. - enum: - - NeverContactSource - - AllowContactingSource - type: string - mirrors: - description: 'mirrors is zero or more locations that may also - contain the same images. No mirror will be configured if not - specified. Images can be pulled from these mirrors only if - they are referenced by their tags. The mirrored location is - obtained by replacing the part of the input reference that - matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo - reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat - causes a mirror.local/redhat/product/repo repository to be - used. Pulling images by tag can potentially yield different - images, depending on which endpoint we pull from. Configuring - a list of mirrors using "ImageDigestMirrorSet" CRD and forcing - digest-pulls for mirrors avoids that issue. The order of mirrors - in this list is treated as the user''s desired priority, while - source is by default considered lower priority than all mirrors. - If no mirror is specified or all image pulls from the mirror - list fail, the image will continue to be pulled from the repository - in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". - Other cluster configuration, including (but not limited to) - other imageTagMirrors objects, may impact the exact order - mirrors are contacted in, or some mirrors may be contacted - in parallel, so this should be considered a preference rather - than a guarantee of ordering. "mirrors" uses one of the following - formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - for more information about the format, see the document about - the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - items: - pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - type: array - x-kubernetes-list-type: set - source: - description: 'source matches the repository that users refer - to, e.g. in image pull specifications. Setting source to a - registry hostname e.g. docker.io. quay.io, or registry.redhat.io, - will match the image pull specification of corressponding - registry. "source" uses one of the following formats: host[:port] - host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo - [*.]host for more information about the format, see the document - about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' - pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ - type: string - required: - - source - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status contains the observed state of the resource. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 975def7c1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,2089 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on BareMetal platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - cloudLoadBalancerConfig: - default: - dnsType: PlatformDefault - description: cloudLoadBalancerConfig is a union that contains - the IP addresses of API, API-Int and Ingress Load Balancers - created on the cloud platform. These values would not be - populated on on-prem platforms. These Load Balancer IPs - are used to configure the in-cluster DNS instances for API, - API-Int and Ingress services. `dnsType` is expected to be - set to `ClusterHosted` when these Load Balancer IP addresses - are populated and used. - nullable: true - properties: - clusterHosted: - description: clusterHosted holds the IP addresses of API, - API-Int and Ingress Load Balancers on Cloud Platforms. - The DNS solution hosted within the cluster use these - IP addresses to provide resolution for API, API-Int - and Ingress services. - properties: - apiIntLoadBalancerIPs: - description: apiIntLoadBalancerIPs holds Load Balancer - IPs for the internal API service. These Load Balancer - IP addresses can be IPv4 and/or IPv6 addresses. - Entries in the apiIntLoadBalancerIPs must be unique. - A maximum of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - apiLoadBalancerIPs: - description: apiLoadBalancerIPs holds Load Balancer - IPs for the API service. These Load Balancer IP - addresses can be IPv4 and/or IPv6 addresses. Could - be empty for private clusters. Entries in the apiLoadBalancerIPs - must be unique. A maximum of 16 IP addresses are - permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - ingressLoadBalancerIPs: - description: ingressLoadBalancerIPs holds IPs for - Ingress Load Balancers. These Load Balancer IP addresses - can be IPv4 and/or IPv6 addresses. Entries in the - ingressLoadBalancerIPs must be unique. A maximum - of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - type: object - dnsType: - default: PlatformDefault - description: dnsType indicates the type of DNS solution - in use within the cluster. Its default value of `PlatformDefault` - indicates that the cluster's DNS is the default provided - by the cloud platform. It can be set to `ClusterHosted` - to bypass the configuration of the cloud default DNS. - In this mode, the cluster needs to provide a self-hosted - DNS solution for the cluster's installation to succeed. - The cluster's use of the cloud's Load Balancers is unaffected - by this setting. The value is immutable after it has - been set at install time. Currently, there is no way - for the customer to add additional DNS entries into - the cluster hosted DNS. Enabling this functionality - allows the user to start their own DNS solution outside - the cluster after installation is complete. The customer - would be responsible for configuring this custom DNS - solution, and it can be run in addition to the in-cluster - DNS solution. - enum: - - ClusterHosted - - PlatformDefault - type: string - x-kubernetes-validations: - - message: dnsType is immutable - rule: oldSelf == '' || self == oldSelf - type: object - x-kubernetes-validations: - - message: clusterHosted is permitted only when dnsType is - ClusterHosted - rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' - ? !has(self.clusterHosted) : true' - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - resourceLabels: - description: resourceLabels is a list of additional labels - to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources - for information on labeling GCP resources. GCP supports - a maximum of 64 labels per resource. OpenShift reserves - 32 labels for internal use, allowing 32 labels for user - configuration. - items: - description: GCPResourceLabel is a label to apply to GCP - resources created for the cluster. - properties: - key: - description: key is the key part of the label. A label - key can have a maximum of 63 characters and cannot - be empty. Label key must begin with a lowercase letter, - and must contain only lowercase letters, numeric characters, - and the following special characters `_-`. Label key - must not have the reserved prefixes `kubernetes-io` - and `openshift-io`. - maxLength: 63 - minLength: 1 - pattern: ^[a-z][0-9a-z_-]{0,62}$ - type: string - x-kubernetes-validations: - - message: label keys must not start with either `openshift-io` - or `kubernetes-io` - rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' - value: - description: value is the value part of the label. A - label value can have a maximum of 63 characters and - cannot be empty. Value must contain only lowercase - letters, numeric characters, and the following special - characters `_-`. - maxLength: 63 - minLength: 1 - pattern: ^[0-9a-z_-]{1,63}$ - type: string - required: - - key - - value - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceLabels are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - resourceTags: - description: resourceTags is a list of additional tags to - apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview - for information on tagging GCP resources. GCP supports a - maximum of 50 tags per resource. - items: - description: GCPResourceTag is a tag to apply to GCP resources - created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 63 characters and cannot be - empty. Tag key must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `._-`. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ - type: string - parentID: - description: 'parentID is the ID of the hierarchical - resource where the tags are defined, e.g. at the Organization - or the Project level. To find the Organization or - Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, - https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. - An OrganizationID must consist of decimal numbers, - and cannot have leading zeroes. A ProjectID must be - 6 to 30 characters in length, can only contain lowercase - letters, numbers, and hyphens, and must start with - a letter, and cannot end with a hyphen.' - maxLength: 32 - minLength: 1 - pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) - type: string - value: - description: value is the value part of the tag. A tag - value can have a maximum of 63 characters and cannot - be empty. Tag value must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `_-.@%=+:,*#&(){}[]` and spaces. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ - type: string - required: - - key - - parentID - - value - type: object - maxItems: 50 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceLabels may only be configured during installation - rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) - || has(oldSelf.resourceLabels) && has(self.resourceLabels)' - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Nutanix platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Ovirt platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on VSphere platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml deleted file mode 100644 index a3eb6efe3..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml +++ /dev/null @@ -1,1761 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 73205cfa1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,2089 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: infrastructures.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Infrastructure - listKind: InfrastructureList - plural: infrastructures - singular: infrastructure - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Infrastructure holds cluster-wide information about Infrastructure. - \ The canonical name is `cluster` \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cloudConfig: - description: "cloudConfig is a reference to a ConfigMap containing - the cloud provider configuration file. This configuration file is - used to configure the Kubernetes cloud provider integration when - using the built-in cloud provider integration or the external cloud - controller manager. The namespace for this config map is openshift-config. - \n cloudConfig should only be consumed by the kube_cloud_config - controller. The controller is responsible for using the user configuration - in the spec for various platforms and combining that with the user - provided ConfigMap in this field to create a stitched kube cloud - config. The controller generates a ConfigMap `kube-cloud-config` - in `openshift-config-managed` namespace with the kube cloud config - is stored in `cloud.conf` key. All the clients are expected to use - the generated ConfigMap only." - properties: - key: - description: Key allows pointing to a specific key/value inside - of the configmap. This is useful for logical file references. - type: string - name: - type: string - type: object - platformSpec: - description: platformSpec holds desired information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - type: object - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - type: object - external: - description: ExternalPlatformType represents generic infrastructure - provider. Platform-specific components should be supplemented - separately. - properties: - platformName: - default: Unknown - description: PlatformName holds the arbitrary string representing - the infrastructure provider name, expected to be set at - the installation time. This field is solely for informational - and reporting purposes and is not expected to be used for - decision-making. - type: string - x-kubernetes-validations: - - message: platform name cannot be changed once set - rule: oldSelf == 'Unknown' || self == oldSelf - type: object - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - type: object - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - failureDomains: - description: failureDomains configures failure domains information - for the Nutanix platform. When set, the failure domains - defined here may be used to spread Machines across prism - element clusters to improve fault tolerance of the cluster. - items: - description: NutanixFailureDomain configures failure domain - information for the Nutanix platform. - properties: - cluster: - description: cluster is to identify the cluster (the - Prism Element under management of the Prism Central), - in which the Machine's VM will be created. The cluster - identifier (uuid or name) can be obtained from the - Prism Central console or using the prism_central API. - properties: - name: - description: name is the resource name in the PC. - It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource in - the PC. It cannot be empty if the type is UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - name: - description: name defines the unique name of a failure - domain. Name is required and must be at most 64 characters - in length. It must consist of only lower case alphanumeric - characters and hyphens (-). It must start and end - with an alphanumeric character. This value is arbitrary - and is used to identify the failure domain within - the platform. - maxLength: 64 - minLength: 1 - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' - type: string - subnets: - description: subnets holds a list of identifiers (one - or more) of the cluster's network subnets for the - Machine's VM to connect to. The subnet identifiers - (uuid or name) can be obtained from the Prism Central - console or using the prism_central API. - items: - description: NutanixResourceIdentifier holds the identity - of a Nutanix PC resource (cluster, image, subnet, - etc.) - properties: - name: - description: name is the resource name in the - PC. It cannot be empty if the type is Name. - type: string - type: - description: type is the identifier type to use - for this resource. - enum: - - UUID - - Name - type: string - uuid: - description: uuid is the UUID of the resource - in the PC. It cannot be empty if the type is - UUID. - type: string - required: - - type - type: object - x-kubernetes-validations: - - message: uuid configuration is required when type - is UUID, and forbidden otherwise - rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) - : !has(self.uuid)' - - message: name configuration is required when type - is Name, and forbidden otherwise - rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) - : !has(self.name)' - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - required: - - cluster - - name - - subnets - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - prismCentral: - description: prismCentral holds the endpoint address and port - to access the Nutanix Prism Central. When a cluster-wide - proxy is installed, by default, this endpoint will be accessed - via the proxy. Should you wish for communication with this - endpoint not to be proxied, please add the endpoint to the - proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS name - or IP address) of the Nutanix Prism Central or Element - (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the Nutanix - Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - prismElements: - description: prismElements holds one or more endpoint address - and port data to access the Nutanix Prism Elements (clusters) - of the Nutanix Prism Central. Currently we only support - one Prism Element (cluster) for an OpenShift cluster, where - all the Nutanix resources (VMs, subnets, volumes, etc.) - used in the OpenShift cluster are located. In the future, - we may support Nutanix resources (VMs, etc.) spread over - multiple Prism Elements (clusters) of the Prism Central. - items: - description: NutanixPrismElementEndpoint holds the name - and endpoint data for a Prism Element (cluster) - properties: - endpoint: - description: endpoint holds the endpoint address and - port data of the Prism Element (cluster). When a cluster-wide - proxy is installed, by default, this endpoint will - be accessed via the proxy. Should you wish for communication - with this endpoint not to be proxied, please add the - endpoint to the proxy spec.noProxy list. - properties: - address: - description: address is the endpoint address (DNS - name or IP address) of the Nutanix Prism Central - or Element (cluster) - maxLength: 256 - type: string - port: - description: port is the port number to access the - Nutanix Prism Central or Element (cluster) - format: int32 - maximum: 65535 - minimum: 1 - type: integer - required: - - address - - port - type: object - name: - description: name is the name of the Prism Element (cluster). - This value will correspond with the cluster field - configured on other resources (eg Machines, PVCs, - etc). - maxLength: 256 - type: string - required: - - endpoint - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - prismCentral - - prismElements - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - type: object - powervs: - description: PowerVS contains settings specific to the IBM Power - Systems Virtual Servers infrastructure provider. - properties: - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - type: - description: type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", - "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", - "Nutanix" and "None". Individual components may not support - all platforms, and must handle unrecognized platforms as None - if they do not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IP addresses, - one from IPv4 family and one from IPv6. In single stack - clusters a single IP address is expected. When omitted, - values from the status.apiServerInternalIPs will be used. - Once set, the list cannot be completely removed (but its - second entry can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: apiServerInternalIPs must contain at most one IPv4 - address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - failureDomains: - description: failureDomains contains the definition of region, - zone and the vCenter topology. If this is omitted failure - domains (regions and zones) will not be used. - items: - description: VSpherePlatformFailureDomainSpec holds the - region and zone failure domain and the vCenter topology - of that failure domain. - properties: - name: - description: name defines the arbitrary but unique name - of a failure domain. - maxLength: 256 - minLength: 1 - type: string - region: - description: region defines the name of a region tag - that will be attached to a vCenter datacenter. The - tag category in vCenter must be named openshift-region. - maxLength: 80 - minLength: 1 - type: string - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - minLength: 1 - type: string - topology: - description: Topology describes a given failure domain - using vSphere constructs - properties: - computeCluster: - description: computeCluster the absolute path of - the vCenter cluster in which virtual machine will - be located. The absolute path is of the form //host/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*? - type: string - datacenter: - description: datacenter is the name of vCenter datacenter - in which virtual machines will be located. The - maximum length of the datacenter name is 80 characters. - maxLength: 80 - type: string - datastore: - description: datastore is the absolute path of the - datastore in which the virtual machine is located. - The absolute path is of the form //datastore/ - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/datastore/.*? - type: string - folder: - description: folder is the absolute path of the - folder where virtual machines are located. The - absolute path is of the form //vm/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/vm/.*? - type: string - networks: - description: networks is the list of port group - network names within this failure domain. Currently, - we only support a single interface per RHCOS virtual - machine. The available networks (port groups) - can be listed using `govc ls 'network/*'` The - single interface should be the absolute path of - the form //network/. - items: - type: string - maxItems: 1 - minItems: 1 - type: array - x-kubernetes-list-type: atomic - resourcePool: - description: resourcePool is the absolute path of - the resource pool where virtual machines will - be created. The absolute path is of the form //host//Resources/. - The maximum length of the path is 2048 characters. - maxLength: 2048 - pattern: ^/.*?/host/.*?/Resources.* - type: string - template: - description: "template is the full inventory path - of the virtual machine or template that will be - cloned when creating new machines in this failure - domain. The maximum length of the path is 2048 - characters. \n When omitted, the template will - be calculated by the control plane machineset - operator based on the region and zone defined - in VSpherePlatformFailureDomainSpec. For example, - for zone=zonea, region=region1, and infrastructure - name=test, the template path would be calculated - as //vm/test-rhcos-region1-zonea." - maxLength: 2048 - minLength: 1 - pattern: ^/.*?/vm/.*? - type: string - required: - - computeCluster - - datacenter - - datastore - - networks - type: object - zone: - description: zone defines the name of a zone tag that - will be attached to a vCenter cluster. The tag category - in vCenter must be named openshift-zone. - maxLength: 80 - minLength: 1 - type: string - required: - - name - - region - - server - - topology - - zone - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IP - addresses, one from IPv4 family and one from IPv6. In single - stack clusters a single IP address is expected. When omitted, - values from the status.ingressIPs will be used. Once set, - the list cannot be completely removed (but its second entry - can). - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - x-kubernetes-validations: - - message: ingressIPs must contain at most one IPv4 address - and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. Each network is provided - in the CIDR format and should be IPv4 or IPv6, for example - "10.0.0.0/8" or "fd00::/8". - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeNetworking: - description: nodeNetworking contains the definition of internal - and external network constraints for assigning the node's - networking. If this field is omitted, networking defaults - to the legacy address selection behavior which is to only - support a single address and return the first one found. - properties: - external: - description: external represents the network configuration - of the node that is externally routable. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - internal: - description: internal represents the network configuration - of the node that is routable only within the cluster. - properties: - excludeNetworkSubnetCidr: - description: excludeNetworkSubnetCidr IP addresses - in subnet ranges will be excluded when selecting - the IP address from the VirtualMachine's VM for - use in the status.addresses fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: atomic - network: - description: network VirtualMachine's VM Network names - that will be used to when searching for status.addresses - fields. Note that if internal.networkSubnetCIDR - and external.networkSubnetCIDR are not set, then - the vNIC associated to this network must only have - a single IP address assigned to it. The available - networks (port groups) can be listed using `govc - ls 'network/*'` - type: string - networkSubnetCidr: - description: networkSubnetCidr IP address on VirtualMachine's - network interfaces included in the fields' CIDRs - that will be used in respective status.addresses - fields. --- - items: - format: cidr - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - vcenters: - description: vcenters holds the connection details for services - to communicate with vCenter. Currently, only a single vCenter - is supported. --- - items: - description: VSpherePlatformVCenterSpec stores the vCenter - connection fields. This is used by the vSphere CCM. - properties: - datacenters: - description: The vCenter Datacenters in which the RHCOS - vm guests are located. This field will be used by - the Cloud Controller Manager. Each datacenter listed - here should be used within a topology. - items: - type: string - minItems: 1 - type: array - x-kubernetes-list-type: set - port: - description: port is the TCP port that will be used - to communicate to the vCenter endpoint. When omitted, - this means the user has no opinion and it is up to - the platform to choose a sensible default, which is - subject to change over time. - format: int32 - maximum: 32767 - minimum: 1 - type: integer - server: - anyOf: - - format: ipv4 - - format: ipv6 - - format: hostname - description: server is the fully-qualified domain name - or the IP address of the vCenter server. --- - maxLength: 255 - type: string - required: - - datacenters - - server - type: object - maxItems: 1 - minItems: 0 - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-validations: - - message: apiServerInternalIPs list is required once set - rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' - - message: ingressIPs list is required once set - rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - apiServerInternalURI: - description: apiServerInternalURL is a valid URI with scheme 'https', - address and optionally a port (defaulting to 443). apiServerInternalURL - can be used by components like kubelets, to contact the Kubernetes - API server using the infrastructure provider rather than Kubernetes - networking. - type: string - apiServerURL: - description: apiServerURL is a valid URI with scheme 'https', address - and optionally a port (defaulting to 443). apiServerURL can be - used by components like the web console to tell users where to find - the Kubernetes API. - type: string - controlPlaneTopology: - default: HighlyAvailable - description: controlPlaneTopology expresses the expectations for operands - that normally run on control nodes. The default is 'HighlyAvailable', - which represents the behavior operators have in a "normal" cluster. - The 'SingleReplica' mode will be used in single-node deployments - and the operators should not configure the operand for highly-available - operation The 'External' mode indicates that the control plane is - hosted externally to the cluster and that its components are not - visible within the cluster. - enum: - - HighlyAvailable - - SingleReplica - - External - type: string - cpuPartitioning: - default: None - description: cpuPartitioning expresses if CPU partitioning is a currently - enabled feature in the cluster. CPU Partitioning means that this - cluster can support partitioning workloads to specific CPU Sets. - Valid values are "None" and "AllNodes". When omitted, the default - value is "None". The default value of "None" indicates that no nodes - will be setup with CPU partitioning. The "AllNodes" value indicates - that all nodes have been setup with CPU partitioning, and can then - be further configured via the PerformanceProfile API. - enum: - - None - - AllNodes - type: string - etcdDiscoveryDomain: - description: 'etcdDiscoveryDomain is the domain used to fetch the - SRV records for discovering etcd servers and clients. For more info: - https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery - deprecated: as of 4.7, this field is no longer set or honored. It - will be removed in a future release.' - type: string - infrastructureName: - description: infrastructureName uniquely identifies a cluster with - a human friendly name. Once set it should not be changed. Must be - of max length 27 and must have only alphanumeric or hyphen characters. - type: string - infrastructureTopology: - default: HighlyAvailable - description: 'infrastructureTopology expresses the expectations for - infrastructure services that do not run on control plane nodes, - usually indicated by a node selector for a `role` value other than - `master`. The default is ''HighlyAvailable'', which represents the - behavior operators have in a "normal" cluster. The ''SingleReplica'' - mode will be used in single-node deployments and the operators should - not configure the operand for highly-available operation NOTE: External - topology mode is not applicable for this field.' - enum: - - HighlyAvailable - - SingleReplica - type: string - platform: - description: "platform is the underlying infrastructure provider for - the cluster. \n Deprecated: Use platformStatus.type instead." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - platformStatus: - description: platformStatus holds status information specific to the - underlying infrastructure provider. - properties: - alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba - Cloud infrastructure provider. - properties: - region: - description: region specifies the region for Alibaba Cloud - resources created for the cluster. - pattern: ^[0-9A-Za-z-]+$ - type: string - resourceGroupID: - description: resourceGroupID is the ID of the resource group - for the cluster. - pattern: ^(rg-[0-9A-Za-z]+)?$ - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Alibaba Cloud resources created for the cluster. - items: - description: AlibabaCloudResourceTag is the set of tags - to add to apply to resources. - properties: - key: - description: key is the key of the tag. - maxLength: 128 - minLength: 1 - type: string - value: - description: value is the value of the tag. - maxLength: 128 - minLength: 1 - type: string - required: - - key - - value - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - required: - - region - type: object - aws: - description: AWS contains settings specific to the Amazon Web - Services infrastructure provider. - properties: - region: - description: region holds the default AWS region for new AWS - resources created by the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html - for information on tagging AWS resources. AWS supports a - maximum of 50 tags per resource. OpenShift reserves 25 tags - for its use, leaving 25 tags available for the user. - items: - description: AWSResourceTag is a tag to apply to AWS resources - created for the cluster. - properties: - key: - description: key is the key of the tag - maxLength: 128 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - value: - description: value is the value of the tag. Some AWS - service do not support empty values. Since tags are - added to resources in many services, the length of - the tag value must meet the requirements of all services. - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.:/=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 25 - type: array - x-kubernetes-list-type: atomic - serviceEndpoints: - description: ServiceEndpoints list contains custom endpoints - which will override default service endpoint of AWS Services. - There must be only one ServiceEndpoint for a service. - items: - description: AWSServiceEndpoint store the configuration - of a custom url to override existing defaults of AWS Services. - properties: - name: - description: name is the name of the AWS service. The - list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html - This must be provided and cannot be empty. - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - pattern: ^https:// - type: string - type: object - type: array - x-kubernetes-list-type: atomic - type: object - azure: - description: Azure contains settings specific to the Azure infrastructure - provider. - properties: - armEndpoint: - description: armEndpoint specifies a URL to use for resource - management in non-soverign clouds such as Azure Stack. - type: string - cloudName: - description: cloudName is the name of the Azure cloud environment - which can be used to configure the Azure SDK with the appropriate - Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - enum: - - "" - - AzurePublicCloud - - AzureUSGovernmentCloud - - AzureChinaCloud - - AzureGermanCloud - - AzureStackCloud - type: string - networkResourceGroupName: - description: networkResourceGroupName is the Resource Group - for network resources like the Virtual Network and Subnets - used by the cluster. If empty, the value is same as ResourceGroupName. - type: string - resourceGroupName: - description: resourceGroupName is the Resource Group for new - Azure resources created for the cluster. - type: string - resourceTags: - description: resourceTags is a list of additional tags to - apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags - for information on tagging Azure resources. Due to limitations - on Automation, Content Delivery Network, DNS Azure resources, - a maximum of 15 tags may be applied. OpenShift reserves - 5 tags for internal use, allowing 10 tags for user configuration. - items: - description: AzureResourceTag is a tag to apply to Azure - resources created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 128 characters and cannot be - empty. Key must begin with a letter, end with a letter, - number or underscore, and must contain only alphanumeric - characters and the following special characters `_ - . -`. - maxLength: 128 - minLength: 1 - pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ - type: string - value: - description: 'value is the value part of the tag. A - tag value can have a maximum of 256 characters and - cannot be empty. Value must contain only alphanumeric - characters and the following special characters `_ - + , - . / : ; < = > ? @`.' - maxLength: 256 - minLength: 1 - pattern: ^[0-9A-Za-z_.=+-@]+$ - type: string - required: - - key - - value - type: object - maxItems: 10 - type: array - x-kubernetes-list-type: atomic - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - baremetal: - description: BareMetal contains settings specific to the BareMetal - platform. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on BareMetal platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for BareMetal deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - equinixMetal: - description: EquinixMetal contains settings specific to the Equinix - Metal infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - external: - description: External contains settings specific to the generic - External infrastructure provider. - properties: - cloudControllerManager: - description: cloudControllerManager contains settings specific - to the external Cloud Controller Manager (a.k.a. CCM or - CPI). When omitted, new nodes will be not tainted and no - extra initialization from the cloud controller manager is - expected. - properties: - state: - description: "state determines whether or not an external - Cloud Controller Manager is expected to be installed - within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - \n Valid values are \"External\", \"None\" and omitted. - When set to \"External\", new nodes will be tainted - as uninitialized when created, preventing them from - running workloads until they are initialized by the - cloud controller manager. When omitted or set to \"None\", - new nodes will be not tainted and no extra initialization - from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - x-kubernetes-validations: - - message: state is immutable once set - rule: self == oldSelf - type: object - x-kubernetes-validations: - - message: state may not be added or removed once set - rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) - && self.state != "External") - type: object - x-kubernetes-validations: - - message: cloudControllerManager may not be added or removed - once set - rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) - gcp: - description: GCP contains settings specific to the Google Cloud - Platform infrastructure provider. - properties: - cloudLoadBalancerConfig: - default: - dnsType: PlatformDefault - description: cloudLoadBalancerConfig is a union that contains - the IP addresses of API, API-Int and Ingress Load Balancers - created on the cloud platform. These values would not be - populated on on-prem platforms. These Load Balancer IPs - are used to configure the in-cluster DNS instances for API, - API-Int and Ingress services. `dnsType` is expected to be - set to `ClusterHosted` when these Load Balancer IP addresses - are populated and used. - nullable: true - properties: - clusterHosted: - description: clusterHosted holds the IP addresses of API, - API-Int and Ingress Load Balancers on Cloud Platforms. - The DNS solution hosted within the cluster use these - IP addresses to provide resolution for API, API-Int - and Ingress services. - properties: - apiIntLoadBalancerIPs: - description: apiIntLoadBalancerIPs holds Load Balancer - IPs for the internal API service. These Load Balancer - IP addresses can be IPv4 and/or IPv6 addresses. - Entries in the apiIntLoadBalancerIPs must be unique. - A maximum of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - apiLoadBalancerIPs: - description: apiLoadBalancerIPs holds Load Balancer - IPs for the API service. These Load Balancer IP - addresses can be IPv4 and/or IPv6 addresses. Could - be empty for private clusters. Entries in the apiLoadBalancerIPs - must be unique. A maximum of 16 IP addresses are - permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - ingressLoadBalancerIPs: - description: ingressLoadBalancerIPs holds IPs for - Ingress Load Balancers. These Load Balancer IP addresses - can be IPv4 and/or IPv6 addresses. Entries in the - ingressLoadBalancerIPs must be unique. A maximum - of 16 IP addresses are permitted. - format: ip - items: - description: IP is an IP address (for example, "10.0.0.0" - or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) - type: string - maxItems: 16 - type: array - x-kubernetes-list-type: set - type: object - dnsType: - default: PlatformDefault - description: dnsType indicates the type of DNS solution - in use within the cluster. Its default value of `PlatformDefault` - indicates that the cluster's DNS is the default provided - by the cloud platform. It can be set to `ClusterHosted` - to bypass the configuration of the cloud default DNS. - In this mode, the cluster needs to provide a self-hosted - DNS solution for the cluster's installation to succeed. - The cluster's use of the cloud's Load Balancers is unaffected - by this setting. The value is immutable after it has - been set at install time. Currently, there is no way - for the customer to add additional DNS entries into - the cluster hosted DNS. Enabling this functionality - allows the user to start their own DNS solution outside - the cluster after installation is complete. The customer - would be responsible for configuring this custom DNS - solution, and it can be run in addition to the in-cluster - DNS solution. - enum: - - ClusterHosted - - PlatformDefault - type: string - x-kubernetes-validations: - - message: dnsType is immutable - rule: oldSelf == '' || self == oldSelf - type: object - x-kubernetes-validations: - - message: clusterHosted is permitted only when dnsType is - ClusterHosted - rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' - ? !has(self.clusterHosted) : true' - projectID: - description: resourceGroupName is the Project ID for new GCP - resources created for the cluster. - type: string - region: - description: region holds the region for new GCP resources - created for the cluster. - type: string - resourceLabels: - description: resourceLabels is a list of additional labels - to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources - for information on labeling GCP resources. GCP supports - a maximum of 64 labels per resource. OpenShift reserves - 32 labels for internal use, allowing 32 labels for user - configuration. - items: - description: GCPResourceLabel is a label to apply to GCP - resources created for the cluster. - properties: - key: - description: key is the key part of the label. A label - key can have a maximum of 63 characters and cannot - be empty. Label key must begin with a lowercase letter, - and must contain only lowercase letters, numeric characters, - and the following special characters `_-`. Label key - must not have the reserved prefixes `kubernetes-io` - and `openshift-io`. - maxLength: 63 - minLength: 1 - pattern: ^[a-z][0-9a-z_-]{0,62}$ - type: string - x-kubernetes-validations: - - message: label keys must not start with either `openshift-io` - or `kubernetes-io` - rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' - value: - description: value is the value part of the label. A - label value can have a maximum of 63 characters and - cannot be empty. Value must contain only lowercase - letters, numeric characters, and the following special - characters `_-`. - maxLength: 63 - minLength: 1 - pattern: ^[0-9a-z_-]{1,63}$ - type: string - required: - - key - - value - type: object - maxItems: 32 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceLabels are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - resourceTags: - description: resourceTags is a list of additional tags to - apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview - for information on tagging GCP resources. GCP supports a - maximum of 50 tags per resource. - items: - description: GCPResourceTag is a tag to apply to GCP resources - created for the cluster. - properties: - key: - description: key is the key part of the tag. A tag key - can have a maximum of 63 characters and cannot be - empty. Tag key must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `._-`. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ - type: string - parentID: - description: 'parentID is the ID of the hierarchical - resource where the tags are defined, e.g. at the Organization - or the Project level. To find the Organization or - Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, - https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. - An OrganizationID must consist of decimal numbers, - and cannot have leading zeroes. A ProjectID must be - 6 to 30 characters in length, can only contain lowercase - letters, numbers, and hyphens, and must start with - a letter, and cannot end with a hyphen.' - maxLength: 32 - minLength: 1 - pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) - type: string - value: - description: value is the value part of the tag. A tag - value can have a maximum of 63 characters and cannot - be empty. Tag value must begin and end with an alphanumeric - character, and must contain only uppercase, lowercase - alphanumeric characters, and the following special - characters `_-.@%=+:,*#&(){}[]` and spaces. - maxLength: 63 - minLength: 1 - pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ - type: string - required: - - key - - parentID - - value - type: object - maxItems: 50 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - x-kubernetes-validations: - - message: resourceTags are immutable and may only be configured - during installation - rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) - type: object - x-kubernetes-validations: - - message: resourceLabels may only be configured during installation - rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) - || has(oldSelf.resourceLabels) && has(self.resourceLabels)' - - message: resourceTags may only be configured during installation - rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) - || has(oldSelf.resourceTags) && has(self.resourceTags)' - ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud - infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - location: - description: Location is where the cluster has been deployed - type: string - providerType: - description: ProviderType indicates the type of cluster that - was created - type: string - resourceGroupName: - description: ResourceGroupName is the Resource Group for new - IBMCloud resources created for the cluster. - type: string - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of an - IBM Cloud service. These endpoints are consumed by components - within the cluster to reach the respective IBM Cloud Services. - items: - description: IBMCloudServiceEndpoint stores the configuration - of a custom url to override existing defaults of IBM Cloud - Services. - properties: - name: - description: 'name is the name of the IBM Cloud service. - Possible values are: CIS, COS, DNSServices, GlobalSearch, - GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, - ResourceManager, or VPC. For example, the IBM Cloud - Private IAM service could be configured with the service - `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` - Whereas the IBM Cloud Private VPC service for US South - (Dallas) could be configured with the service `name` - of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' - enum: - - CIS - - COS - - DNSServices - - GlobalSearch - - GlobalTagging - - HyperProtect - - IAM - - KeyProtect - - ResourceController - - ResourceManager - - VPC - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - type: string - x-kubernetes-validations: - - message: url must be a valid absolute URL - rule: isURL(self) - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - kubevirt: - description: Kubevirt contains settings specific to the kubevirt - infrastructure provider. - properties: - apiServerInternalIP: - description: apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. - type: string - ingressIP: - description: ingressIP is an external IP which routes to the - default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. - type: string - type: object - nutanix: - description: Nutanix contains settings specific to the Nutanix - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Nutanix platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - type: object - openstack: - description: OpenStack contains settings specific to the OpenStack - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - cloudName: - description: cloudName is the name of the desired OpenStack - cloud in the client configuration file (`clouds.yaml`). - type: string - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on OpenStack platform which can be a - user-managed or openshift-managed load balancer that - is to be used for the OpenShift API and Ingress endpoints. - When set to OpenShiftManagedDefault the static pods - in charge of API and Ingress traffic load-balancing - defined in the machine config operator will be deployed. - When set to UserManaged these static pods will not be - deployed and it is expected that the load balancer is - configured out of band by the deployer. When omitted, - this means no opinion and the platform is left to choose - a reasonable default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for OpenStack deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure - provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on Ovirt platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - nodeDNSIP: - description: 'deprecated: as of 4.6, this field is no longer - set or honored. It will be removed in a future release.' - type: string - type: object - powervs: - description: PowerVS contains settings specific to the Power Systems - Virtual Servers infrastructure provider. - properties: - cisInstanceCRN: - description: CISInstanceCRN is the CRN of the Cloud Internet - Services instance managing the DNS zone for the cluster's - base domain - type: string - dnsInstanceCRN: - description: DNSInstanceCRN is the CRN of the DNS Services - instance managing the DNS zone for the cluster's base domain - type: string - region: - description: region holds the default Power VS region for - new Power VS resources created by the cluster. - type: string - resourceGroup: - description: 'resourceGroup is the resource group name for - new IBMCloud resources created for a cluster. The resource - group specified here will be used by cluster-image-registry-operator - to set up a COS Instance in IBMCloud for the cluster registry. - More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. - When omitted, the image registry operator won''t be able - to configure storage, which results in the image registry - cluster operator not being in an available state.' - maxLength: 40 - pattern: ^[a-zA-Z0-9-_ ]+$ - type: string - x-kubernetes-validations: - - message: resourceGroup is immutable once set - rule: oldSelf == '' || self == oldSelf - serviceEndpoints: - description: serviceEndpoints is a list of custom endpoints - which will override the default service endpoints of a Power - VS service. - items: - description: PowervsServiceEndpoint stores the configuration - of a custom url to override existing defaults of PowerVS - Services. - properties: - name: - description: name is the name of the Power VS service. - Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api - ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller - Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ - type: string - url: - description: url is fully qualified URI with scheme - https, that overrides the default generated endpoint - for a client. This must be provided and cannot be - empty. - format: uri - pattern: ^https:// - type: string - required: - - name - - url - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - zone: - description: 'zone holds the default zone for the new Power - VS resources created by the cluster. Note: Currently only - single-zone OCP clusters are supported' - type: string - type: object - x-kubernetes-validations: - - message: cannot unset resourceGroup once set - rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' - type: - description: "type is the underlying infrastructure provider for - the cluster. This value controls whether infrastructure automation - such as service load balancers, dynamic volume provisioning, - machine creation and deletion, and other integrations are enabled. - If None, no infrastructure automation is enabled. Allowed values - are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", - \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", - \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components - may not support all platforms, and must handle unrecognized - platforms as None if they do not support that platform. \n This - value will be synced with to the `status.platform` and `status.platformStatus.type`. - Currently this value cannot be changed once set." - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - vsphere: - description: VSphere contains settings specific to the VSphere - infrastructure provider. - properties: - apiServerInternalIP: - description: "apiServerInternalIP is an IP address to contact - the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. It is the IP that the - Infrastructure.status.apiServerInternalURI points to. It - is the IP for a self-hosted load balancer in front of the - API servers. \n Deprecated: Use APIServerInternalIPs instead." - type: string - apiServerInternalIPs: - description: apiServerInternalIPs are the IP addresses to - contact the Kubernetes API server that can be used by components - inside the cluster, like kubelets using the infrastructure - rather than Kubernetes networking. These are the IPs for - a self-hosted load balancer in front of the API servers. - In dual stack clusters this list contains two IPs otherwise - only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - ingressIP: - description: "ingressIP is an external IP which routes to - the default ingress controller. The IP is a suitable target - of a wildcard DNS record used to resolve default route host - names. \n Deprecated: Use IngressIPs instead." - type: string - ingressIPs: - description: ingressIPs are the external IPs which route to - the default ingress controller. The IPs are suitable targets - of a wildcard DNS record used to resolve default route host - names. In dual stack clusters this list contains two IPs - otherwise only one. - format: ip - items: - type: string - maxItems: 2 - type: array - x-kubernetes-list-type: set - loadBalancer: - default: - type: OpenShiftManagedDefault - description: loadBalancer defines how the load balancer used - by the cluster is configured. - properties: - type: - default: OpenShiftManagedDefault - description: type defines the type of load balancer used - by the cluster on VSphere platform which can be a user-managed - or openshift-managed load balancer that is to be used - for the OpenShift API and Ingress endpoints. When set - to OpenShiftManagedDefault the static pods in charge - of API and Ingress traffic load-balancing defined in - the machine config operator will be deployed. When set - to UserManaged these static pods will not be deployed - and it is expected that the load balancer is configured - out of band by the deployer. When omitted, this means - no opinion and the platform is left to choose a reasonable - default. The default value is OpenShiftManagedDefault. - enum: - - OpenShiftManagedDefault - - UserManaged - type: string - x-kubernetes-validations: - - message: type is immutable once set - rule: oldSelf == '' || self == oldSelf - type: object - machineNetworks: - description: machineNetworks are IP networks used to connect - all the OpenShift cluster nodes. - items: - description: CIDR is an IP address range in CIDR notation - (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) - type: string - maxItems: 32 - type: array - x-kubernetes-list-type: set - nodeDNSIP: - description: nodeDNSIP is the IP address for the internal - DNS used by the nodes. Unlike the one managed by the DNS - operator, `NodeDNSIP` provides name resolution for the nodes - themselves. There is no DNS-as-a-service for vSphere deployments. - In order to minimize necessary changes to the datacenter - DNS, a DNS service is hosted as a static pod to serve those - hostnames to the nodes in the cluster. - type: string - type: object - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch deleted file mode 100644 index d127130ad..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml-patch +++ /dev/null @@ -1,24 +0,0 @@ -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf - value: - - format: ipv4 - - format: ipv6 - - format: hostname -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format - value: cidr -- op: add - path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format - value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml deleted file mode 100644 index c582dccb1..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ /dev/null @@ -1,553 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: ingresses.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Ingress - listKind: IngressList - plural: ingresses - singular: ingress - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Ingress holds cluster-wide information about ingress, including - the default ingress domain used for routes. The canonical name is `cluster`. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - appsDomain: - description: appsDomain is an optional domain to use instead of the - one specified in the domain field when a Route is created without - specifying an explicit host. If appsDomain is nonempty, this value - is used to generate default host values for Route. Unlike domain, - appsDomain may be modified after installation. This assumes a new - ingresscontroller has been setup with a wildcard certificate. - type: string - componentRoutes: - description: "componentRoutes is an optional list of routes that are - managed by OpenShift components that a cluster-admin is able to - configure the hostname and serving certificate for. The namespace - and name of each route in this list should match an existing entry - in the status.componentRoutes list. \n To determine the set of configurable - Routes, look at namespace and name of entries in the .status.componentRoutes - list, where participating operators write the status of configurable - routes." - items: - description: ComponentRouteSpec allows for configuration of a route's - hostname and serving certificate. - properties: - hostname: - description: hostname is the hostname that should be used by - the route. - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - name: - description: "name is the logical name of the route to customize. - \n The namespace and name of this componentRoute must match - a corresponding entry in the list of status.componentRoutes - if the route is to be customized." - maxLength: 256 - minLength: 1 - type: string - namespace: - description: "namespace is the namespace of the route to customize. - \n The namespace and name of this componentRoute must match - a corresponding entry in the list of status.componentRoutes - if the route is to be customized." - maxLength: 63 - minLength: 1 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - type: string - servingCertKeyPairSecret: - description: servingCertKeyPairSecret is a reference to a secret - of type `kubernetes.io/tls` in the openshift-config namespace. - The serving cert/key pair must match and will be used by the - operator to fulfill the intent of serving with this name. - If the custom hostname uses the default routing suffix of - the cluster, the Secret specification for a serving certificate - will not be needed. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - required: - - hostname - - name - - namespace - type: object - type: array - x-kubernetes-list-map-keys: - - namespace - - name - x-kubernetes-list-type: map - domain: - description: "domain is used to generate a default host name for a - route when the route's host name is empty. The generated host name - will follow this pattern: \"..\". - \n It is also used as the default wildcard domain suffix for ingress. - The default ingresscontroller domain will follow this pattern: \"*.\". - \n Once set, changing domain is not currently supported." - type: string - loadBalancer: - description: loadBalancer contains the load balancer details in general - which are not only specific to the underlying infrastructure provider - of the current cluster and are required for Ingress Controller to - work on OpenShift. - properties: - platform: - description: platform holds configuration specific to the underlying - infrastructure provider for the ingress load balancers. When - omitted, this means the user has no opinion and the platform - is left to choose reasonable defaults. These defaults are subject - to change over time. - properties: - aws: - description: aws contains settings specific to the Amazon - Web Services infrastructure provider. - properties: - type: - description: "type allows user to set a load balancer - type. When this field is set the default ingresscontroller - will get created using the specified LBType. If this - field is not set then the default ingress controller - of LBType Classic will be created. Valid values are: - \n * \"Classic\": A Classic Load Balancer that makes - routing decisions at either the transport layer (TCP/SSL) - or the application layer (HTTP/HTTPS). See the following - for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb - \n * \"NLB\": A Network Load Balancer that makes routing - decisions at the transport layer (TCP/SSL). See the - following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" - enum: - - NLB - - Classic - type: string - required: - - type - type: object - type: - description: type is the underlying infrastructure provider - for the cluster. Allowed values are "AWS", "Azure", "BareMetal", - "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", - "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and - "None". Individual components may not support all platforms, - and must handle unrecognized platforms as None if they do - not support that platform. - enum: - - "" - - AWS - - Azure - - BareMetal - - GCP - - Libvirt - - OpenStack - - None - - VSphere - - oVirt - - IBMCloud - - KubeVirt - - EquinixMetal - - PowerVS - - AlibabaCloud - - Nutanix - - External - type: string - type: object - type: object - requiredHSTSPolicies: - description: "requiredHSTSPolicies specifies HSTS policies that are - required to be set on newly created or updated routes matching - the domainPattern/s and namespaceSelector/s that are specified in - the policy. Each requiredHSTSPolicy must have at least a domainPattern - and a maxAge to validate a route HSTS Policy route annotation, and - affect route admission. \n A candidate route is checked for HSTS - Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" - E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains - \n - For each candidate route, if it matches a requiredHSTSPolicy - domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, - and includeSubdomainsPolicy must be valid to be admitted. Otherwise, - the route is rejected. - The first match, by domainPattern and optional - namespaceSelector, in the ordering of the RequiredHSTSPolicies determines - the route's admission status. - If the candidate route doesn't match - any requiredHSTSPolicy domainPattern and optional namespaceSelector, - then it may use any HSTS Policy annotation. \n The HSTS policy configuration - may be changed after routes have already been created. An update - to a previously admitted route may then fail if the updated route - does not conform to the updated HSTS policy configuration. However, - changing the HSTS policy configuration will not cause a route that - is already admitted to stop working. \n Note that if there are no - RequiredHSTSPolicies, any HSTS Policy annotation on the route is - valid." - items: - properties: - domainPatterns: - description: "domainPatterns is a list of domains for which - the desired HSTS annotations are required. If domainPatterns - is specified and a route is created with a spec.host matching - one of the domains, the route must specify the HSTS Policy - components described in the matching RequiredHSTSPolicy. \n - The use of wildcards is allowed like this: *.foo.com matches - everything under foo.com. foo.com only matches foo.com, so - to cover foo.com and everything under it, you must specify - *both*." - items: - type: string - minItems: 1 - type: array - includeSubDomainsPolicy: - description: 'includeSubDomainsPolicy means the HSTS Policy - should apply to any subdomains of the host''s domain name. Thus, - for the host bar.foo.com, if includeSubDomainsPolicy was set - to RequireIncludeSubDomains: - the host app.bar.foo.com would - inherit the HSTS Policy of bar.foo.com - the host bar.foo.com - would inherit the HSTS Policy of bar.foo.com - the host foo.com - would NOT inherit the HSTS Policy of bar.foo.com - the host - def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' - enum: - - RequireIncludeSubDomains - - RequireNoIncludeSubDomains - - NoOpinion - type: string - maxAge: - description: maxAge is the delta time range in seconds during - which hosts are regarded as HSTS hosts. If set to 0, it negates - the effect, and hosts are removed as HSTS hosts. If set to - 0 and includeSubdomains is specified, all subdomains of the - host are also removed as HSTS hosts. maxAge is a time-to-live - value, and if this policy is not refreshed on a client, the - HSTS policy will eventually expire on that client. - properties: - largestMaxAge: - description: The largest allowed value (in seconds) of the - RequiredHSTSPolicy max-age This value can be left unspecified, - in which case no upper limit is enforced. - format: int32 - maximum: 2147483647 - minimum: 0 - type: integer - smallestMaxAge: - description: The smallest allowed value (in seconds) of - the RequiredHSTSPolicy max-age Setting max-age=0 allows - the deletion of an existing HSTS header from a host. This - is a necessary tool for administrators to quickly correct - mistakes. This value can be left unspecified, in which - case no lower limit is enforced. - format: int32 - maximum: 2147483647 - minimum: 0 - type: integer - type: object - namespaceSelector: - description: namespaceSelector specifies a label selector such - that the policy applies only to those routes that are in namespaces - with labels that match the selector, and are in one of the - DomainPatterns. Defaults to the empty LabelSelector, which - matches everything. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - preloadPolicy: - description: preloadPolicy directs the client to include hosts - in its host preload list so that it never needs to do an initial - load to get the HSTS header (note that this is not defined - in RFC 6797 and is therefore client implementation-dependent). - enum: - - RequirePreload - - RequireNoPreload - - NoOpinion - type: string - required: - - domainPatterns - type: object - type: array - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - componentRoutes: - description: componentRoutes is where participating operators place - the current route status for routes whose hostnames and serving - certificates can be customized by the cluster-admin. - items: - description: ComponentRouteStatus contains information allowing - configuration of a route's hostname and serving certificate. - properties: - conditions: - description: "conditions are used to communicate the state of - the componentRoutes entry. \n Supported conditions include - Available, Degraded and Progressing. \n If available is true, - the content served by the route can be accessed by users. - This includes cases where a default may continue to serve - content while the customized route specified by the cluster-admin - is being configured. \n If Degraded is true, that means something - has gone wrong trying to handle the componentRoutes entry. - The currentHostnames field may or may not be in effect. \n - If Progressing is true, that means the component is taking - some action related to the componentRoutes entry." - items: - description: "Condition contains details for one aspect of - the current state of this API Resource. --- This struct - is intended for direct use as an array at the field path - .status.conditions. For example, \n type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: \"Available\", \"Progressing\", - and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields - }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should - be when the underlying condition changed. If that is - not known, then using the time when the API field changed - is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, - if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the - current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier - indicating the reason for the condition's last transition. - Producers of specific condition types may define expected - values and meanings for this field, and whether the - values are considered a guaranteed API. The value should - be a CamelCase string. This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across - resources like Available, but because arbitrary conditions - can be useful (see .node.status.conditions), the ability - to deconflict is important. The regex it matches is - (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: consumingUsers is a slice of ServiceAccounts that - need to have read permission on the servingCertKeyPairSecret - secret. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - currentHostnames: - description: currentHostnames is the list of current names used - by the route. Typically, this list should consist of a single - hostname, but if multiple hostnames are supported by the route - the operator may write multiple entries to this list. - items: - description: "Hostname is an alias for hostname string validation. - \n The left operand of the | is the original kubebuilder - hostname validation format, which is incorrect because it - allows upper case letters, disallows hyphen or number in - the TLD, and allows labels to start/end in non-alphanumeric - characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. - ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ - \n The right operand of the | is a new pattern that mimics - the current API route admission validation on hostname, - except that it allows hostnames longer than the maximum - length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - \n Both operand patterns are made available so that modifications - on ingress spec can still happen after an invalid hostname - was saved via validation by the incorrect left operand of - the | operator." - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - minItems: 1 - type: array - defaultHostname: - description: defaultHostname is the hostname of this route prior - to customization. - pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ - type: string - name: - description: "name is the logical name of the route to customize. - It does not have to be the actual name of a route resource - but it cannot be renamed. \n The namespace and name of this - componentRoute must match a corresponding entry in the list - of spec.componentRoutes if the route is to be customized." - maxLength: 256 - minLength: 1 - type: string - namespace: - description: "namespace is the namespace of the route to customize. - It must be a real namespace. Using an actual namespace ensures - that no two components will conflict and the same component - can be installed multiple times. \n The namespace and name - of this componentRoute must match a corresponding entry in - the list of spec.componentRoutes if the route is to be customized." - maxLength: 63 - minLength: 1 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - type: string - relatedObjects: - description: relatedObjects is a list of resources which are - useful when debugging or inspecting how spec.componentRoutes - is applied. - items: - description: ObjectReference contains enough information to - let you inspect or modify the referred object. - properties: - group: - description: group of the referent. - type: string - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: resource of the referent. - type: string - required: - - group - - name - - resource - type: object - minItems: 1 - type: array - required: - - defaultHostname - - name - - namespace - - relatedObjects - type: object - type: array - x-kubernetes-list-map-keys: - - namespace - - name - x-kubernetes-list-type: map - defaultPlacement: - description: "defaultPlacement is set at installation time to control - which nodes will host the ingress router pods by default. The options - are control-plane nodes or worker nodes. \n This field works by - dictating how the Cluster Ingress Operator will consider unset replicas - and nodePlacement fields in IngressController resources when creating - the corresponding Deployments. \n See the documentation for the - IngressController replicas and nodePlacement fields for more information. - \n When omitted, the default value is Workers" - enum: - - ControlPlane - - Workers - - "" - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 5392f14c6..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml deleted file mode 100644 index d71799f59..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-Default.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 8ec000b89..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: networks.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network holds cluster-wide information about Network. The canonical - name is `cluster`. It is used to configure the desired network configuration, - such as: IP address pools for services/pod IPs, network plugin, etc. Please - view network.spec for an explanation on what applies when configuring this - resource. \n Compatibility level 1: Stable within a major release for a - minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration. As a general - rule, this SHOULD NOT be read directly. Instead, you should consume - the NetworkStatus, as it indicates the currently deployed configuration. - Currently, most spec fields are immutable after installation. Please - view the individual ones for further details on each. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. This field is immutable - after installation. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - externalIP: - description: externalIP defines configuration for controllers that - affect Service.ExternalIP. If nil, then ExternalIP is not allowed - to be set. - properties: - autoAssignCIDRs: - description: autoAssignCIDRs is a list of CIDRs from which to - automatically assign Service.ExternalIP. These are assigned - when the service is of type LoadBalancer. In general, this is - only useful for bare-metal clusters. In Openshift 3.x, this - was misleadingly called "IngressIPs". Automatically assigned - External IPs are not affected by any ExternalIPPolicy rules. - Currently, only one entry may be provided. - items: - type: string - type: array - policy: - description: policy is a set of restrictions applied to the ExternalIP - field. If nil or empty, then ExternalIP is not allowed to be - set. - properties: - allowedCIDRs: - description: allowedCIDRs is the list of allowed CIDRs. - items: - type: string - type: array - rejectedCIDRs: - description: rejectedCIDRs is the list of disallowed CIDRs. - These take precedence over allowedCIDRs. - items: - type: string - type: array - type: object - type: object - networkType: - description: 'NetworkType is the plugin that is to be deployed (e.g. - OpenShiftSDN). This should match a value that the cluster-network-operator - understands, or else no networking will be installed. Currently - supported values are: - OpenShiftSDN This field is immutable after - installation.' - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. This field is immutable after installation. - items: - type: string - type: array - serviceNodePortRange: - description: The port range allowed for Services of type NodePort. - If not specified, the default of 30000-32767 will be used. Such - Services without a NodePort specified will have one automatically - allocated from this range. This parameter can be updated after the - cluster is installed. - pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - clusterNetwork: - description: IP address pool to use for pod IPs. - items: - description: ClusterNetworkEntry is a contiguous block of IP addresses - from which pod IPs are allocated. - properties: - cidr: - description: The complete block for pod IPs. - type: string - hostPrefix: - description: The size (prefix) of block to allocate to each - node. If this field is not used by the plugin, it can be left - unset. - format: int32 - minimum: 0 - type: integer - type: object - type: array - clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. - type: integer - conditions: - description: 'conditions represents the observations of a network.config - current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", - "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", - "NetworkTypeMigrationTargetCNIInUse" and "NetworkTypeMigrationOriginalCNIPurged"' - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - migration: - description: Migration contains the cluster network migration configuration. - properties: - mtu: - description: MTU contains the MTU migration configuration. - properties: - machine: - description: Machine contains MTU migration configuration - for the machine's uplink. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - network: - description: Network contains MTU migration configuration - for the default network. - properties: - from: - description: From is the MTU to migrate from. - format: int32 - minimum: 0 - type: integer - to: - description: To is the MTU to migrate to. - format: int32 - minimum: 0 - type: integer - type: object - type: object - networkType: - description: 'NetworkType is the target plugin that is to be deployed. - Currently supported values are: OpenShiftSDN, OVNKubernetes' - enum: - - OpenShiftSDN - - OVNKubernetes - type: string - type: object - networkType: - description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). - type: string - serviceNetwork: - description: IP address pool for services. Currently, we only support - a single entry here. - items: - type: string - type: array - type: object - required: - - spec - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml deleted file mode 100644 index ab135b221..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1107 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: nodes.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Node - listKind: NodeList - plural: nodes - singular: node - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Node holds cluster-wide information about node specific features. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - cgroupMode: - description: CgroupMode determines the cgroups version on the node - enum: - - v1 - - v2 - - "" - type: string - workerLatencyProfile: - description: WorkerLatencyProfile determins the how fast the kubelet - is updating the status and corresponding reaction of the cluster - enum: - - Default - - MediumUpdateAverageReaction - - LowUpdateSlowReaction - type: string - type: object - status: - description: status holds observed values. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml deleted file mode 100644 index bc588e098..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml +++ /dev/null @@ -1,698 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: oauths.config.openshift.io -spec: - group: config.openshift.io - names: - kind: OAuth - listKind: OAuthList - plural: oauths - singular: oauth - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "OAuth holds cluster-wide information about OAuth. The canonical - name is `cluster`. It is used to configure the integrated OAuth server. - This configuration is only honored when the top level Authentication config - has type set to IntegratedOAuth. \n Compatibility level 1: Stable within - a major release for a minimum of 12 months or 3 minor releases (whichever - is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - identityProviders: - description: identityProviders is an ordered list of ways for a user - to identify themselves. When this list is empty, no identities are - provisioned for users. - items: - description: IdentityProvider provides identities for users authenticating - using credentials - properties: - basicAuth: - description: basicAuth contains configuration options for the - BasicAuth IdP - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the remote URL to connect to - type: string - type: object - github: - description: github enables user authentication using GitHub - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. This can only be configured when hostname is set - to a non-empty value. The namespace for this config map - is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - hostname: - description: hostname is the optional domain (e.g. "mycompany.com") - for use with a hosted instance of GitHub Enterprise. It - must match the GitHub Enterprise settings value configured - at /setup/settings#hostname. - type: string - organizations: - description: organizations optionally restricts which organizations - are allowed to log in - items: - type: string - type: array - teams: - description: teams optionally restricts which teams are - allowed to log in. Format is /. - items: - type: string - type: array - type: object - gitlab: - description: gitlab enables user authentication using GitLab - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the oauth server base URL - type: string - type: object - google: - description: google enables user authentication using Google - credentials - properties: - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - hostedDomain: - description: hostedDomain is the optional Google App domain - (e.g. "mycompany.com") to restrict logins to - type: string - type: object - htpasswd: - description: htpasswd enables user authentication using an HTPasswd - file to validate credentials - properties: - fileData: - description: fileData is a required reference to a secret - by name containing the data to use as the htpasswd file. - The key "htpasswd" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. If the specified htpasswd data is not - valid, the identity provider is not honored. The namespace - for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - keystone: - description: keystone enables user authentication using keystone - password credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - domainName: - description: domainName is required for keystone v3 - type: string - tlsClientCert: - description: tlsClientCert is an optional reference to a - secret by name that contains the PEM-encoded TLS client - certificate to present when connecting to the server. - The key "tls.crt" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - tlsClientKey: - description: tlsClientKey is an optional reference to a - secret by name that contains the PEM-encoded TLS private - key for the client certificate referenced in tlsClientCert. - The key "tls.key" is used to locate the data. If specified - and the secret or expected key is not found, the identity - provider is not honored. If the specified certificate - data is not valid, the identity provider is not honored. - The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - url: - description: url is the remote URL to connect to - type: string - type: object - ldap: - description: ldap enables user authentication using LDAP credentials - properties: - attributes: - description: attributes maps LDAP attributes to identities - properties: - email: - description: email is the list of attributes whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - items: - type: string - type: array - id: - description: id is the list of attributes whose values - should be used as the user ID. Required. First non-empty - attribute is used. At least one attribute is required. - If none of the listed attribute have a value, authentication - fails. LDAP standard identity attribute is "dn" - items: - type: string - type: array - name: - description: name is the list of attributes whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity LDAP standard - display name attribute is "cn" - items: - type: string - type: array - preferredUsername: - description: preferredUsername is the list of attributes - whose values should be used as the preferred username. - LDAP standard login attribute is "uid" - items: - type: string - type: array - type: object - bindDN: - description: bindDN is an optional DN to bind with during - the search phase. - type: string - bindPassword: - description: bindPassword is an optional reference to a - secret by name containing a password to bind with during - the search phase. The key "bindPassword" is used to locate - the data. If specified and the secret or expected key - is not found, the identity provider is not honored. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - insecure: - description: 'insecure, if true, indicates the connection - should not use TLS WARNING: Should not be set to `true` - with the URL scheme "ldaps://" as "ldaps://" URLs always - attempt to connect using TLS, even when `insecure` is - set to `true` When `true`, "ldap://" URLS connect insecurely. - When `false`, "ldap://" URLs are upgraded to a TLS connection - using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' - type: boolean - url: - description: 'url is an RFC 2255 URL which specifies the - LDAP search parameters to use. The syntax of the URL is: - ldap://host:port/basedn?attribute?scope?filter' - type: string - type: object - mappingMethod: - description: mappingMethod determines how identities from this - provider are mapped to users Defaults to "claim" - type: string - name: - description: 'name is used to qualify the identities returned - by this provider. - It MUST be unique and not shared by any - other identity provider used - It MUST be a valid path segment: - name cannot equal "." or ".." or contain "/" or "%" or ":" - Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' - type: string - openID: - description: openID enables user authentication using OpenID - credentials - properties: - ca: - description: ca is an optional reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. The key "ca.crt" is used to locate - the data. If specified and the config map or expected - key is not found, the identity provider is not honored. - If the specified ca data is not valid, the identity provider - is not honored. If empty, the default system roots are - used. The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - claims: - description: claims mappings - properties: - email: - description: email is the list of claims whose values - should be used as the email address. Optional. If - unspecified, no email is set for the identity - items: - type: string - type: array - x-kubernetes-list-type: atomic - groups: - description: groups is the list of claims value of which - should be used to synchronize groups from the OIDC - provider to OpenShift for the user. If multiple claims - are specified, the first one with a non-empty value - is used. - items: - description: OpenIDClaim represents a claim retrieved - from an OpenID provider's tokens or userInfo responses - minLength: 1 - type: string - type: array - x-kubernetes-list-type: atomic - name: - description: name is the list of claims whose values - should be used as the display name. Optional. If unspecified, - no display name is set for the identity - items: - type: string - type: array - x-kubernetes-list-type: atomic - preferredUsername: - description: preferredUsername is the list of claims - whose values should be used as the preferred username. - If unspecified, the preferred username is determined - from the value of the sub claim - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - clientID: - description: clientID is the oauth client ID - type: string - clientSecret: - description: clientSecret is a required reference to the - secret by name containing the oauth client secret. The - key "clientSecret" is used to locate the data. If the - secret or expected key is not found, the identity provider - is not honored. The namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - extraAuthorizeParameters: - additionalProperties: - type: string - description: extraAuthorizeParameters are any custom parameters - to add to the authorize request. - type: object - extraScopes: - description: extraScopes are any scopes to request in addition - to the standard "openid" scope. - items: - type: string - type: array - issuer: - description: issuer is the URL that the OpenID Provider - asserts as its Issuer Identifier. It must use the https - scheme with no query or fragment component. - type: string - type: object - requestHeader: - description: requestHeader enables user authentication using - request header credentials - properties: - ca: - description: ca is a required reference to a config map - by name containing the PEM-encoded CA bundle. It is used - as a trust anchor to validate the TLS certificate presented - by the remote server. Specifically, it allows verification - of incoming requests to prevent header spoofing. The key - "ca.crt" is used to locate the data. If the config map - or expected key is not found, the identity provider is - not honored. If the specified ca data is not valid, the - identity provider is not honored. The namespace for this - config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - challengeURL: - description: challengeURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect WWW-Authenticate challenges will - be redirected here. ${url} is replaced with the current - URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when challenge is set to true. - type: string - clientCommonNames: - description: clientCommonNames is an optional list of common - names to require a match from. If empty, any client certificate - validated against the clientCA bundle is considered authoritative. - items: - type: string - type: array - emailHeaders: - description: emailHeaders is the set of headers to check - for the email address - items: - type: string - type: array - headers: - description: headers is the set of headers to check for - identity information - items: - type: string - type: array - loginURL: - description: loginURL is a URL to redirect unauthenticated - /authorize requests to Unauthenticated requests from OAuth - clients which expect interactive logins will be redirected - here ${url} is replaced with the current URL, escaped - to be safe in a query parameter https://www.example.com/sso-login?then=${url} - ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} - Required when login is set to true. - type: string - nameHeaders: - description: nameHeaders is the set of headers to check - for the display name - items: - type: string - type: array - preferredUsernameHeaders: - description: preferredUsernameHeaders is the set of headers - to check for the preferred username - items: - type: string - type: array - type: object - type: - description: type identifies the identity provider type for - this entry. - type: string - type: object - type: array - x-kubernetes-list-type: atomic - templates: - description: templates allow you to customize pages like the login - page. - properties: - error: - description: error is the name of a secret that specifies a go - template to use to render error pages during the authentication - or grant flow. The key "errors.html" is used to locate the template - data. If specified and the secret or expected key is not found, - the default error page is used. If the specified template is - not valid, the default error page is used. If unspecified, the - default error page is used. The namespace for this secret is - openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - login: - description: login is the name of a secret that specifies a go - template to use to render the login page. The key "login.html" - is used to locate the template data. If specified and the secret - or expected key is not found, the default login page is used. - If the specified template is not valid, the default login page - is used. If unspecified, the default login page is used. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - providerSelection: - description: providerSelection is the name of a secret that specifies - a go template to use to render the provider selection page. - The key "providers.html" is used to locate the template data. - If specified and the secret or expected key is not found, the - default provider selection page is used. If the specified template - is not valid, the default provider selection page is used. If - unspecified, the default provider selection page is used. The - namespace for this secret is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - type: object - tokenConfig: - description: tokenConfig contains options for authorization and access - tokens - properties: - accessTokenInactivityTimeout: - description: "accessTokenInactivityTimeout defines the token inactivity - timeout for tokens granted by any client. The value represents - the maximum amount of time that can occur between consecutive - uses of the token. Tokens become invalid if they are not used - within this temporal window. The user will need to acquire a - new token to regain access once a token times out. Takes valid - time duration string such as \"5m\", \"1.5h\" or \"2h45m\". - The minimum allowed value for duration is 300s (5 minutes). - If the timeout is configured per client, then that value takes - precedence. If the timeout value is not specified and the client - does not override the value, then tokens are valid until their - lifetime. \n WARNING: existing tokens' timeout will not be affected - (lowered) by changing this value" - type: string - accessTokenInactivityTimeoutSeconds: - description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: - setting this field has no effect.' - format: int32 - type: integer - accessTokenMaxAgeSeconds: - description: accessTokenMaxAgeSeconds defines the maximum age - of access tokens - format: int32 - type: integer - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml deleted file mode 100644 index ec2c7af3f..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: projects.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Project - listKind: ProjectList - plural: projects - singular: project - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Project holds cluster-wide information about Project. The canonical - name is `cluster` \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - projectRequestMessage: - description: projectRequestMessage is the string presented to a user - if they are unable to request a project via the projectrequest api - endpoint - type: string - projectRequestTemplate: - description: projectRequestTemplate is the template to use for creating - projects in response to projectrequest. This must point to a template - in 'openshift-config' namespace. It is optional. If it is not specified, - a default template is used. - properties: - name: - description: name is the metadata.name of the referenced project - request template - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml deleted file mode 100644 index f7a427662..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,130 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - profileCustomizations: - description: profileCustomizations contains configuration for modifying - the default behavior of existing scheduler profiles. - properties: - dynamicResourceAllocation: - description: dynamicResourceAllocation allows to enable or disable - dynamic resource allocation within the scheduler. Dynamic resource - allocation is an API for requesting and sharing resources between - pods and containers inside a pod. Third-party resource drivers - are responsible for tracking and allocating resources. Different - kinds of resources support arbitrary parameters for defining - requirements and initialization. Valid values are Enabled, Disabled - and omitted. When omitted, this means no opinion and the platform - is left to choose a reasonable default, which is subject to - change over time. The current default is Disabled. - enum: - - "" - - Enabled - - Disabled - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml deleted file mode 100644 index aa89c10a4..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-Default.crd.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: Default - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 071b9dff9..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,130 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: schedulers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Scheduler - listKind: SchedulerList - plural: schedulers - singular: scheduler - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Scheduler holds cluster-wide config information to run the Kubernetes - Scheduler and influence its placement decisions. The canonical name for - this config is `cluster`. \n Compatibility level 1: Stable within a major - release for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - defaultNodeSelector: - description: 'defaultNodeSelector helps set the cluster-wide default - node selector to restrict pod placement to specific nodes. This - is applied to the pods created in all namespaces and creates an - intersection with any existing nodeSelectors already set on a pod, - additionally constraining that pod''s selector. For example, defaultNodeSelector: - "type=user-node,region=east" would set nodeSelector field in pod - spec to "type=user-node,region=east" to all pods created in all - namespaces. Namespaces having project-wide node selectors won''t - be impacted even if this field is set. This adds an annotation section - to the namespace. For example, if a new namespace is created with - node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: - type=user-node,region=east gets added to the project. When the openshift.io/node-selector - annotation is set on the project the value is used in preference - to the value we are setting for defaultNodeSelector field. For instance, - openshift.io/node-selector: "type=user-node,region=west" means that - the default of "type=user-node,region=east" set in defaultNodeSelector - would not be applied.' - type: string - mastersSchedulable: - description: 'MastersSchedulable allows masters nodes to be schedulable. - When this flag is turned on, all the master nodes in the cluster - will be made schedulable, so that workload pods can run on them. - The default value for this field is false, meaning none of the master - nodes are schedulable. Important Note: Once the workload pods start - running on the master nodes, extreme care must be taken to ensure - that cluster-critical control plane components are not impacted. - Please turn on this field after doing due diligence.' - type: boolean - policy: - description: 'DEPRECATED: the scheduler Policy API has been deprecated - and will be removed in a future release. policy is a reference to - a ConfigMap containing scheduler policy which has user specified - predicates and priorities. If this ConfigMap is not available scheduler - will default to use DefaultAlgorithmProvider. The namespace for - this configmap is openshift-config.' - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - profile: - description: "profile sets which scheduling profile should be set - in order to configure scheduling decisions for new pods. \n Valid - values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" - Defaults to \"LowNodeUtilization\"" - enum: - - "" - - LowNodeUtilization - - HighNodeUtilization - - NoScoring - type: string - profileCustomizations: - description: profileCustomizations contains configuration for modifying - the default behavior of existing scheduler profiles. - properties: - dynamicResourceAllocation: - description: dynamicResourceAllocation allows to enable or disable - dynamic resource allocation within the scheduler. Dynamic resource - allocation is an API for requesting and sharing resources between - pods and containers inside a pod. Third-party resource drivers - are responsible for tracking and allocating resources. Different - kinds of resources support arbitrary parameters for defining - requirements and initialization. Valid values are Enabled, Disabled - and omitted. When omitted, this means no opinion and the platform - is left to choose a reasonable default, which is subject to - change over time. The current default is Disabled. - enum: - - "" - - Enabled - - Disabled - type: string - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml deleted file mode 100644 index 94e7f015a..000000000 --- a/vendor/github.com/openshift/api/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml +++ /dev/null @@ -1,431 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - capability.openshift.io/name: Build - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: builds.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Build - listKind: BuildList - plural: builds - singular: build - preserveUnknownFields: false - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Build configures the behavior of OpenShift builds for the entire - cluster. This includes default settings that can be overridden in BuildConfig - objects, and overrides which are applied to all builds. \n The canonical - name is \"cluster\" \n Compatibility level 1: Stable within a major release - for a minimum of 12 months or 3 minor releases (whichever is longer)." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds user-settable values for the build controller - configuration - properties: - additionalTrustedCA: - description: "AdditionalTrustedCA is a reference to a ConfigMap containing - additional CAs that should be trusted for image pushes and pulls - during builds. The namespace for this config map is openshift-config. - \n DEPRECATED: Additional CAs for image pull and push should be - set on image.config.openshift.io/cluster instead." - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - buildDefaults: - description: BuildDefaults controls the default information for Builds - properties: - defaultProxy: - description: "DefaultProxy contains the default proxy settings - for all build operations, including image pull/push and source - download. \n Values can be overrode by setting the `HTTP_PROXY`, - `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build - config's strategy." - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS - requests. Empty means unset and will not result in an env - var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs and/or IPs for which the proxy should not be - used. Empty means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only - be consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key - \"ca-bundle.crt\", merging it with the system default trust - bundle, and writing the merged trust bundle to a ConfigMap - named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections - must use the trusted-ca-bundle for all HTTPS requests to - the proxy, and may use the trusted-ca-bundle for non-proxy - HTTPS requests as well. \n The namespace for the ConfigMap - referenced by trustedCA is \"openshift-config\". Here is - an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom - CA certificate bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - type: object - env: - description: Env is a set of default environment variables that - will be applied to the build if the specified variables do not - exist on the build - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: 'Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in - the container and any service environment variables. If - a variable cannot be resolved, the reference in the input - string will be unchanged. Double $$ are reduced to a single - $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless - of whether the variable exists or not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, spec.nodeName, - spec.serviceAccountName, status.hostIP, status.podIP, - status.podIPs.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - gitProxy: - description: "GitProxy contains the proxy settings for git operations - only. If set, this will override any Proxy settings for all - git commands, such as git clone. \n Values that are not set - here will be inherited from DefaultProxy." - properties: - httpProxy: - description: httpProxy is the URL of the proxy for HTTP requests. Empty - means unset and will not result in an env var. - type: string - httpsProxy: - description: httpsProxy is the URL of the proxy for HTTPS - requests. Empty means unset and will not result in an env - var. - type: string - noProxy: - description: noProxy is a comma-separated list of hostnames - and/or CIDRs and/or IPs for which the proxy should not be - used. Empty means unset and will not result in an env var. - type: string - readinessEndpoints: - description: readinessEndpoints is a list of endpoints used - to verify readiness of the proxy. - items: - type: string - type: array - trustedCA: - description: "trustedCA is a reference to a ConfigMap containing - a CA certificate bundle. The trustedCA field should only - be consumed by a proxy validator. The validator is responsible - for reading the certificate bundle from the required key - \"ca-bundle.crt\", merging it with the system default trust - bundle, and writing the merged trust bundle to a ConfigMap - named \"trusted-ca-bundle\" in the \"openshift-config-managed\" - namespace. Clients that expect to make proxy connections - must use the trusted-ca-bundle for all HTTPS requests to - the proxy, and may use the trusted-ca-bundle for non-proxy - HTTPS requests as well. \n The namespace for the ConfigMap - referenced by trustedCA is \"openshift-config\". Here is - an example ConfigMap (in yaml): \n apiVersion: v1 kind: - ConfigMap metadata: name: user-ca-bundle namespace: openshift-config - data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom - CA certificate bundle. -----END CERTIFICATE-----" - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - type: object - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. User can override a default label by - providing a label with the same name in their Build/BuildConfig. - items: - properties: - name: - description: Name defines the name of the label. It must - have non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - type: object - type: array - resources: - description: Resources defines resource requirements to execute - the build. - properties: - claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only be - set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in - pod.spec.resourceClaims of the Pod where this field - is used. It makes that resource available inside a - container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. Requests cannot exceed - Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - buildOverrides: - description: BuildOverrides controls override settings for builds - properties: - forcePull: - description: ForcePull overrides, if set, the equivalent value - in the builds, i.e. false disables force pull for all builds, - true enables force pull for all builds, independently of what - each build specifies itself - type: boolean - imageLabels: - description: ImageLabels is a list of docker labels that are applied - to the resulting image. If user provided a label in their Build/BuildConfig - with the same name as one in this list, the user's label will - be overwritten. - items: - properties: - name: - description: Name defines the name of the label. It must - have non-zero length. - type: string - value: - description: Value defines the literal value of the label. - type: string - type: object - type: array - nodeSelector: - additionalProperties: - type: string - description: NodeSelector is a selector which must be true for - the build pod to fit on a node - type: object - tolerations: - description: Tolerations is a list of Tolerations that will override - any existing tolerations set on a build pod. - items: - description: The pod this Toleration is attached to tolerates - any taint that matches the triple using - the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. - Empty means match all taint effects. When specified, allowed - values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match - all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to - the value. Valid operators are Exists and Equal. Defaults - to Equal. Exists is equivalent to wildcard for value, - so that a pod can tolerate all taints of a particular - category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of - time the toleration (which must be of effect NoExecute, - otherwise this field is ignored) tolerates the taint. - By default, it is not set, which means tolerate the taint - forever (do not evict). Zero and negative values will - be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml deleted file mode 100644 index 5e2dea3ea..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] APIServer" -crd: 0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml deleted file mode 100644 index 92e7d72e6..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.authentication.testsuite.yaml +++ /dev/null @@ -1,284 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-CustomNoUpgrade.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient diff --git a/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml deleted file mode 100644 index f3090558b..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.clusterversion.testsuite.yaml +++ /dev/null @@ -1,472 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - - name: Should be able to set a custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - - name: Should be able to set multiple custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - - name: Invalid custom signature store should throw error - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "osus1.ocp.com" - expectedError: "url must be a valid absolute URL" - - name: Should be able to unset the signature stores - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml deleted file mode 100644 index ab1a123b6..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Custom] DNS" -crd: 0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} - - name: Should be able to specify an AWS role ARN for a private hosted zone - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Should not be able to specify unsupported platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: Azure - azure: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" - - name: Should not be able to specify invalid AWS role ARN - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - metadata: - name: cluster - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo - expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" - - name: Should not be able to specify different type and platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" - onUpdate: - - name: Can switch from empty (default), to AWS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Upgrade case is valid - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" diff --git a/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml deleted file mode 100644 index 24433f4f7..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml +++ /dev/null @@ -1,321 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Custom] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - onUpdate: - - name: Should not be able to modify an existing GCP ResourceLabels Label - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add a Label to an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to remove a Label from an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "openshift-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "kubernetes-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not be able to modify an existing GCP ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - - {parentID: "test-project-123", key: "new", value: "tag"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - - {parentID: "test-project-123", key: "key2", value: "value2"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "test-project-123", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" diff --git a/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml deleted file mode 100644 index 59e9fbdff..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.network.testsuite.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] Network" -crd: 0000_10_config-operator_01_network-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml deleted file mode 100644 index 57b546b63..000000000 --- a/vendor/github.com/openshift/api/config/v1/custom.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go index 4ff5208f2..f99454758 100644 --- a/vendor/github.com/openshift/api/config/v1/doc.go +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -1,6 +1,7 @@ // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true // +kubebuilder:validation:Optional // +groupName=config.openshift.io diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 7b7cbf640..7a9d129cf 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -1,5 +1,7 @@ package v1 +import "fmt" + // FeatureGateDescription is a golang-only interface used to contains details for a feature gate. type FeatureGateDescription struct { // FeatureGateAttributes is the information that appears in the API @@ -15,6 +17,14 @@ type FeatureGateDescription struct { OwningProduct OwningProduct } +type ClusterProfileName string + +var ( + Hypershift = ClusterProfileName("include.release.openshift.io/ibm-cloud-managed") + SelfManaged = ClusterProfileName("include.release.openshift.io/self-managed-high-availability") + AllClusterProfiles = []ClusterProfileName{Hypershift, SelfManaged} +) + type OwningProduct string var ( @@ -22,443 +32,567 @@ var ( kubernetes = OwningProduct("Kubernetes") ) -var ( - FeatureGateValidatingAdmissionPolicy = FeatureGateName("ValidatingAdmissionPolicy") - validatingAdmissionPolicy = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateValidatingAdmissionPolicy, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "benluddy", - OwningProduct: kubernetes, - } - - FeatureGateGatewayAPI = FeatureGateName("GatewayAPI") - gateGatewayAPI = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGatewayAPI, - }, - OwningJiraComponent: "Routing", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, - } - - FeatureGateOpenShiftPodSecurityAdmission = FeatureGateName("OpenShiftPodSecurityAdmission") - openShiftPodSecurityAdmission = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateOpenShiftPodSecurityAdmission, - }, - OwningJiraComponent: "auth", - ResponsiblePerson: "stlaz", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProvider = FeatureGateName("ExternalCloudProvider") - externalCloudProvider = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProvider, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderAzure = FeatureGateName("ExternalCloudProviderAzure") - externalCloudProviderAzure = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderAzure, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderGCP = FeatureGateName("ExternalCloudProviderGCP") - externalCloudProviderGCP = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderGCP, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } - - FeatureGateExternalCloudProviderExternal = FeatureGateName("ExternalCloudProviderExternal") - externalCloudProviderExternal = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateExternalCloudProviderExternal, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "elmiko", - OwningProduct: ocpSpecific, - } - - FeatureGateCSIDriverSharedResource = FeatureGateName("CSIDriverSharedResource") - csiDriverSharedResource = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateCSIDriverSharedResource, - }, - OwningJiraComponent: "builds", - ResponsiblePerson: "adkaplan", - OwningProduct: ocpSpecific, - } +type featureGateBuilder struct { + name string + owningJiraComponent string + responsiblePerson string + owningProduct OwningProduct - FeatureGateBuildCSIVolumes = FeatureGateName("BuildCSIVolumes") - buildCSIVolumes = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateBuildCSIVolumes, - }, - OwningJiraComponent: "builds", - ResponsiblePerson: "adkaplan", - OwningProduct: ocpSpecific, - } - - FeatureGateNodeSwap = FeatureGateName("NodeSwap") - nodeSwap = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateNodeSwap, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "ehashman", - OwningProduct: kubernetes, - } - - FeatureGateMachineAPIProviderOpenStack = FeatureGateName("MachineAPIProviderOpenStack") - machineAPIProviderOpenStack = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineAPIProviderOpenStack, - }, - OwningJiraComponent: "openstack", - ResponsiblePerson: "egarcia", - OwningProduct: ocpSpecific, - } - - FeatureGateInsightsConfigAPI = FeatureGateName("InsightsConfigAPI") - insightsConfigAPI = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateInsightsConfigAPI, - }, - OwningJiraComponent: "insights", - ResponsiblePerson: "tremes", - OwningProduct: ocpSpecific, - } - - FeatureGateDynamicResourceAllocation = FeatureGateName("DynamicResourceAllocation") - dynamicResourceAllocation = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDynamicResourceAllocation, - }, - OwningJiraComponent: "scheduling", - ResponsiblePerson: "jchaloup", - OwningProduct: kubernetes, - } - - FeatureGateAzureWorkloadIdentity = FeatureGateName("AzureWorkloadIdentity") - azureWorkloadIdentity = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAzureWorkloadIdentity, - }, - OwningJiraComponent: "cloud-credential-operator", - ResponsiblePerson: "abutcher", - OwningProduct: ocpSpecific, - } - - FeatureGateMaxUnavailableStatefulSet = FeatureGateName("MaxUnavailableStatefulSet") - maxUnavailableStatefulSet = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMaxUnavailableStatefulSet, - }, - OwningJiraComponent: "apps", - ResponsiblePerson: "atiratree", - OwningProduct: kubernetes, - } - - FeatureGateEventedPLEG = FeatureGateName("EventedPLEG") - eventedPleg = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateEventedPLEG, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "sairameshv", - OwningProduct: kubernetes, - } - - FeatureGatePrivateHostedZoneAWS = FeatureGateName("PrivateHostedZoneAWS") - privateHostedZoneAWS = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGatePrivateHostedZoneAWS, - }, - OwningJiraComponent: "Routing", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, - } - - FeatureGateSigstoreImageVerification = FeatureGateName("SigstoreImageVerification") - sigstoreImageVerification = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateSigstoreImageVerification, - }, - OwningJiraComponent: "node", - ResponsiblePerson: "sgrunert", - OwningProduct: ocpSpecific, - } - - FeatureGateGCPLabelsTags = FeatureGateName("GCPLabelsTags") - gcpLabelsTags = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGCPLabelsTags, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "bhb", - OwningProduct: ocpSpecific, - } - - FeatureGateAlibabaPlatform = FeatureGateName("AlibabaPlatform") - alibabaPlatform = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAlibabaPlatform, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: ocpSpecific, - } + statusByClusterProfileByFeatureSet map[ClusterProfileName]map[FeatureSet]bool +} - FeatureGateCloudDualStackNodeIPs = FeatureGateName("CloudDualStackNodeIPs") - cloudDualStackNodeIPs = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateCloudDualStackNodeIPs, - }, - OwningJiraComponent: "machine-config-operator/platform-baremetal", - ResponsiblePerson: "mkowalsk", - OwningProduct: kubernetes, - } - FeatureGateVSphereStaticIPs = FeatureGateName("VSphereStaticIPs") - vSphereStaticIPs = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVSphereStaticIPs, - }, - OwningJiraComponent: "splat", - ResponsiblePerson: "rvanderp3", - OwningProduct: ocpSpecific, +// newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled +func newFeatureGate(name string) *featureGateBuilder { + b := &featureGateBuilder{ + name: name, + statusByClusterProfileByFeatureSet: map[ClusterProfileName]map[FeatureSet]bool{}, } - - FeatureGateRouteExternalCertificate = FeatureGateName("RouteExternalCertificate") - routeExternalCertificate = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateRouteExternalCertificate, - }, - OwningJiraComponent: "router", - ResponsiblePerson: "thejasn", - OwningProduct: ocpSpecific, + for _, clusterProfile := range AllClusterProfiles { + byFeatureSet := map[FeatureSet]bool{} + for _, featureSet := range AllFixedFeatureSets { + byFeatureSet[featureSet] = false + } + b.statusByClusterProfileByFeatureSet[clusterProfile] = byFeatureSet } + return b +} - FeatureGateAdminNetworkPolicy = FeatureGateName("AdminNetworkPolicy") - adminNetworkPolicy = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAdminNetworkPolicy, - }, - OwningJiraComponent: "Networking/ovn-kubernetes", - ResponsiblePerson: "tssurya", - OwningProduct: ocpSpecific, - } +func (b *featureGateBuilder) reportProblemsToJiraComponent(owningJiraComponent string) *featureGateBuilder { + b.owningJiraComponent = owningJiraComponent + return b +} - FeatureGateNetworkLiveMigration = FeatureGateName("NetworkLiveMigration") - sdnLiveMigration = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateNetworkLiveMigration, - }, - OwningJiraComponent: "Networking/ovn-kubernetes", - ResponsiblePerson: "pliu", - OwningProduct: ocpSpecific, - } +func (b *featureGateBuilder) contactPerson(responsiblePerson string) *featureGateBuilder { + b.responsiblePerson = responsiblePerson + return b +} - FeatureGateAutomatedEtcdBackup = FeatureGateName("AutomatedEtcdBackup") - automatedEtcdBackup = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateAutomatedEtcdBackup, - }, - OwningJiraComponent: "etcd", - ResponsiblePerson: "hasbro17", - OwningProduct: ocpSpecific, - } +func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureGateBuilder { + b.owningProduct = owningProduct + return b +} - FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = FeatureGateName("MachineAPIOperatorDisableMachineHealthCheckController") - machineAPIOperatorDisableMachineHealthCheckController = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineAPIOperatorDisableMachineHealthCheckController, - }, - OwningJiraComponent: "ecoproject", - ResponsiblePerson: "msluiter", - OwningProduct: ocpSpecific, +func (b *featureGateBuilder) enableIn(featureSets ...FeatureSet) *featureGateBuilder { + for clusterProfile := range b.statusByClusterProfileByFeatureSet { + for _, featureSet := range featureSets { + b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true + } } + return b +} - FeatureGateDNSNameResolver = FeatureGateName("DNSNameResolver") - dnsNameResolver = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDNSNameResolver, - }, - OwningJiraComponent: "dns", - ResponsiblePerson: "miciah", - OwningProduct: ocpSpecific, +func (b *featureGateBuilder) enableForClusterProfile(clusterProfile ClusterProfileName, featureSets ...FeatureSet) *featureGateBuilder { + for _, featureSet := range featureSets { + b.statusByClusterProfileByFeatureSet[clusterProfile][featureSet] = true } + return b +} - FeatureGateVSphereControlPlaneMachineset = FeatureGateName("VSphereControlPlaneMachineSet") - vSphereControlPlaneMachineset = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVSphereControlPlaneMachineset, - }, - OwningJiraComponent: "splat", - ResponsiblePerson: "rvanderp3", - OwningProduct: ocpSpecific, +func (b *featureGateBuilder) register() (FeatureGateName, error) { + if len(b.name) == 0 { + return "", fmt.Errorf("missing name") } - - FeatureGateMachineConfigNodes = FeatureGateName("MachineConfigNodes") - machineConfigNodes = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMachineConfigNodes, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "cdoern", - OwningProduct: ocpSpecific, + if len(b.owningJiraComponent) == 0 { + return "", fmt.Errorf("missing owningJiraComponent") } - - FeatureGateClusterAPIInstall = FeatureGateName("ClusterAPIInstall") - clusterAPIInstall = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateClusterAPIInstall, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "vincepri", - OwningProduct: ocpSpecific, + if len(b.responsiblePerson) == 0 { + return "", fmt.Errorf("missing responsiblePerson") } - - FeatureGateMetricsServer = FeatureGateName("MetricsServer") - metricsServer = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMetricsServer, - }, - OwningJiraComponent: "Monitoring", - ResponsiblePerson: "slashpai", - OwningProduct: ocpSpecific, + if len(b.owningProduct) == 0 { + return "", fmt.Errorf("missing owningProduct") } - FeatureGateInstallAlternateInfrastructureAWS = FeatureGateName("InstallAlternateInfrastructureAWS") - installAlternateInfrastructureAWS = FeatureGateDescription{ + featureGateName := FeatureGateName(b.name) + description := FeatureGateDescription{ FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateInstallAlternateInfrastructureAWS, + Name: featureGateName, }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "padillon", - OwningProduct: ocpSpecific, + OwningJiraComponent: b.owningJiraComponent, + ResponsiblePerson: b.responsiblePerson, + OwningProduct: b.owningProduct, } - FeatureGateGCPClusterHostedDNS = FeatureGateName("GCPClusterHostedDNS") - gcpClusterHostedDNS = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateGCPClusterHostedDNS, - }, - OwningJiraComponent: "Installer", - ResponsiblePerson: "barbacbd", - OwningProduct: ocpSpecific, - } + // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination + for clusterProfile, byFeatureSet := range b.statusByClusterProfileByFeatureSet { + for featureSet, enabled := range byFeatureSet { + if _, ok := allFeatureGates[clusterProfile]; !ok { + allFeatureGates[clusterProfile] = map[FeatureSet]*FeatureGateEnabledDisabled{} + } + if _, ok := allFeatureGates[clusterProfile][featureSet]; !ok { + allFeatureGates[clusterProfile][featureSet] = &FeatureGateEnabledDisabled{} + } - FeatureGateMixedCPUsAllocation = FeatureGateName("MixedCPUsAllocation") - mixedCPUsAllocation = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateMixedCPUsAllocation, - }, - OwningJiraComponent: "NodeTuningOperator", - ResponsiblePerson: "titzhak", - OwningProduct: ocpSpecific, + if enabled { + allFeatureGates[clusterProfile][featureSet].Enabled = append(allFeatureGates[clusterProfile][featureSet].Enabled, description) + } else { + allFeatureGates[clusterProfile][featureSet].Disabled = append(allFeatureGates[clusterProfile][featureSet].Disabled, description) + } + } } - FeatureGateManagedBootImages = FeatureGateName("ManagedBootImages") - managedBootImages = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateManagedBootImages, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "djoshy", - OwningProduct: ocpSpecific, - } + return featureGateName, nil +} - FeatureGateDisableKubeletCloudCredentialProviders = FeatureGateName("DisableKubeletCloudCredentialProviders") - disableKubeletCloudCredentialProviders = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateDisableKubeletCloudCredentialProviders, - }, - OwningJiraComponent: "cloud-provider", - ResponsiblePerson: "jspeed", - OwningProduct: kubernetes, +func (b *featureGateBuilder) mustRegister() FeatureGateName { + ret, err := b.register() + if err != nil { + panic(err) } + return ret +} - FeatureGateOnClusterBuild = FeatureGateName("OnClusterBuild") - onClusterBuild = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateOnClusterBuild, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "dkhater", - OwningProduct: ocpSpecific, +func FeatureSets(clusterProfile ClusterProfileName, featureSet FeatureSet) (*FeatureGateEnabledDisabled, error) { + byFeatureSet, ok := allFeatureGates[clusterProfile] + if !ok { + return nil, fmt.Errorf("no information found for ClusterProfile=%q", clusterProfile) } - - FeatureGateSignatureStores = FeatureGateName("SignatureStores") - signatureStores = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateSignatureStores, - }, - OwningJiraComponent: "Cluster Version Operator", - ResponsiblePerson: "lmohanty", - OwningProduct: ocpSpecific, + featureGates, ok := byFeatureSet[featureSet] + if !ok { + return nil, fmt.Errorf("no information found for FeatureSet=%q under ClusterProfile=%q", featureSet, clusterProfile) } + return featureGates.DeepCopy(), nil +} - FeatureGateKMSv1 = FeatureGateName("KMSv1") - kmsv1 = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateKMSv1, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "dgrisonnet", - OwningProduct: kubernetes, - } +func AllFeatureSets() map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled { + ret := map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{} - FeatureGatePinnedImages = FeatureGateName("PinnedImages") - pinnedImages = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGatePinnedImages, - }, - OwningJiraComponent: "MachineConfigOperator", - ResponsiblePerson: "jhernand", - OwningProduct: ocpSpecific, - } + for clusterProfile, byFeatureSet := range allFeatureGates { + newByFeatureSet := map[FeatureSet]*FeatureGateEnabledDisabled{} - FeatureGateUpgradeStatus = FeatureGateName("UpgradeStatus") - upgradeStatus = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateUpgradeStatus, - }, - OwningJiraComponent: "Cluster Version Operator", - ResponsiblePerson: "pmuller", - OwningProduct: ocpSpecific, + for featureSet, enabledDisabled := range byFeatureSet { + newByFeatureSet[featureSet] = enabledDisabled.DeepCopy() + } + ret[clusterProfile] = newByFeatureSet } - FeatureGateTranslateStreamCloseWebsocketRequests = FeatureGateName("TranslateStreamCloseWebsocketRequests") - translateStreamCloseWebsocketRequests = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateTranslateStreamCloseWebsocketRequests, - }, - OwningJiraComponent: "kube-apiserver", - ResponsiblePerson: "akashem", - OwningProduct: kubernetes, - } + return ret +} - FeatureGateVolumeGroupSnapshot = FeatureGateName("VolumeGroupSnapshot") - volumeGroupSnapshot = FeatureGateDescription{ - FeatureGateAttributes: FeatureGateAttributes{ - Name: FeatureGateVolumeGroupSnapshot, - }, - OwningJiraComponent: "Storage / Kubernetes External Components", - ResponsiblePerson: "fbertina", - OwningProduct: kubernetes, - } +var ( + allFeatureGates = map[ClusterProfileName]map[FeatureSet]*FeatureGateEnabledDisabled{} + + FeatureGateServiceAccountTokenNodeBindingValidation = newFeatureGate("ServiceAccountTokenNodeBindingValidation"). + reportProblemsToJiraComponent("apiserver-auth"). + contactPerson("stlaz"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). + reportProblemsToJiraComponent("apiserver-auth"). + contactPerson("stlaz"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateServiceAccountTokenPodNodeInfo = newFeatureGate("ServiceAccountTokenPodNodeInfo"). + reportProblemsToJiraComponent("apiserver-auth"). + contactPerson("stlaz"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("benluddy"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission"). + reportProblemsToJiraComponent("auth"). + contactPerson("stlaz"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalCloudProvider = newFeatureGate("ExternalCloudProvider"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalCloudProviderAzure = newFeatureGate("ExternalCloudProviderAzure"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalCloudProviderGCP = newFeatureGate("ExternalCloudProviderGCP"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalCloudProviderExternal = newFeatureGate("ExternalCloudProviderExternal"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("elmiko"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCSIDriverSharedResource = newFeatureGate("CSIDriverSharedResource"). + reportProblemsToJiraComponent("builds"). + contactPerson("adkaplan"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). + reportProblemsToJiraComponent("builds"). + contactPerson("adkaplan"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNodeSwap = newFeatureGate("NodeSwap"). + reportProblemsToJiraComponent("node"). + contactPerson("ehashman"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). + reportProblemsToJiraComponent("openstack"). + contactPerson("egarcia"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). + reportProblemsToJiraComponent("scheduling"). + contactPerson("jchaloup"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity"). + reportProblemsToJiraComponent("cloud-credential-operator"). + contactPerson("abutcher"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). + reportProblemsToJiraComponent("apps"). + contactPerson("atiratree"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateEventedPLEG = newFeatureGate("EventedPLEG"). + reportProblemsToJiraComponent("node"). + contactPerson("sairameshv"). + productScope(kubernetes). + mustRegister() + + FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). + reportProblemsToJiraComponent("Routing"). + contactPerson("miciah"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). + reportProblemsToJiraComponent("node"). + contactPerson("sgrunert"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). + reportProblemsToJiraComponent("Installer"). + contactPerson("bhb"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). + reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). + contactPerson("mkowalsk"). + productScope(kubernetes). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). + reportProblemsToJiraComponent("router"). + contactPerson("thejasn"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("tssurya"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("pliu"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). + reportProblemsToJiraComponent("Networking/cluster-network-operator"). + contactPerson("kyrtapz"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). + reportProblemsToJiraComponent("etcd"). + contactPerson("hasbro17"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAutomatedEtcdBackup = newFeatureGate("AutomatedEtcdBackup"). + reportProblemsToJiraComponent("etcd"). + contactPerson("hasbro17"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = newFeatureGate("MachineAPIOperatorDisableMachineHealthCheckController"). + reportProblemsToJiraComponent("ecoproject"). + contactPerson("msluiter"). + productScope(ocpSpecific). + mustRegister() + + FeatureGateDNSNameResolver = newFeatureGate("DNSNameResolver"). + reportProblemsToJiraComponent("dns"). + contactPerson("miciah"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("cdoern"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("vincepri"). + productScope(ocpSpecific). + mustRegister() + + FeatureGateMetricsServer = newFeatureGate("MetricsServer"). + reportProblemsToJiraComponent("Monitoring"). + contactPerson("slashpai"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInstallAlternateInfrastructureAWS = newFeatureGate("InstallAlternateInfrastructureAWS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("padillon"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). + reportProblemsToJiraComponent("NodeTuningOperator"). + contactPerson("titzhak"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateManagedBootImages = newFeatureGate("ManagedBootImages"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). + reportProblemsToJiraComponent("cloud-provider"). + contactPerson("jspeed"). + productScope(kubernetes). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("dkhater"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateSignatureStores = newFeatureGate("SignatureStores"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("lmohanty"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateKMSv1 = newFeatureGate("KMSv1"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("dgrisonnet"). + productScope(kubernetes). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGatePinnedImages = newFeatureGate("PinnedImages"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("jhernand"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("pmuller"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("akashem"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("fbertina"). + productScope(kubernetes). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). + reportProblemsToJiraComponent("authentication"). + contactPerson("stlaz"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + enableForClusterProfile(Hypershift, Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExample = newFeatureGate("Example"). + reportProblemsToJiraComponent("cluster-config"). + contactPerson("deads"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). + reportProblemsToJiraComponent("olm"). + contactPerson("joe"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLM = newFeatureGate("NewOLM"). + reportProblemsToJiraComponent("olm"). + contactPerson("joe"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateExternalRouteCertificate = newFeatureGate("ExternalRouteCertificate"). + reportProblemsToJiraComponent("network-edge"). + contactPerson("miciah"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAlertingRules = newFeatureGate("AlertingRules"). + reportProblemsToJiraComponent("Monitoring"). + contactPerson("simon"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). + reportProblemsToJiraComponent("metal"). + contactPerson("EmilienM"). + productScope(ocpSpecific). + enableIn(Default, TechPreviewNoUpgrade). + mustRegister() + + FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). + reportProblemsToJiraComponent("insights"). + contactPerson("tremes"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateImagePolicy = newFeatureGate("ImagePolicy"). + reportProblemsToJiraComponent("node"). + contactPerson("rphillips"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("jerzhang"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). + reportProblemsToJiraComponent("Monitoring"). + contactPerson("rexagod"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml deleted file mode 100644 index 75f846a3d..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] APIServer" -crd: 0000_10_config-operator_01_apiserver-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm - diff --git a/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml deleted file mode 100644 index 6e966c15b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.authentication.testsuite.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-Default.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Shouldn't be able to use the OIDC type in a stable version of the resource - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expectedError: "spec.type: Unsupported value: \"OIDC\": supported values: \"\", \"None\", \"IntegratedOAuth\"" \ No newline at end of file diff --git a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml deleted file mode 100644 index b422ebd20..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.build.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Build" -crd: 0000_10_openshift-controller-manager-operator_01_build.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Build - initial: | - apiVersion: config.openshift.io/v1 - kind: Build - spec: {} # No spec is required for a Build - expected: | - apiVersion: config.openshift.io/v1 - kind: Build - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml deleted file mode 100644 index 177e8f691..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.clusteroperator.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ClusterOperator" -crd: 0000_00_cluster-version-operator_01_clusteroperator.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterOperator - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterOperator - spec: {} # No spec is required for a ClusterOperator - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterOperator - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml deleted file mode 100644 index 4c3fed149..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.clusterversion.testsuite.yaml +++ /dev/null @@ -1,418 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml deleted file mode 100644 index 0081816fc..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.console.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Console" -crd: 0000_10_config-operator_01_console.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Console - initial: | - apiVersion: config.openshift.io/v1 - kind: Console - spec: {} # No spec is required for a Console - expected: | - apiVersion: config.openshift.io/v1 - kind: Console - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml deleted file mode 100644 index 3054d200e..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml +++ /dev/null @@ -1,105 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] DNS" -crd: 0000_10_config-operator_01_dns-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} - - name: Should be able to specify an AWS role ARN for a private hosted zone - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Should not be able to specify unsupported platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: Azure - azure: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" - - name: Should not be able to specify invalid AWS role ARN - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - metadata: - name: cluster - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo - expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" - - name: Should not be able to specify different type and platform - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" - onUpdate: - - name: Can switch from empty (default), to AWS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: AWS - aws: - privateZoneIAMRole: arn:aws:iam::123456789012:role/foo - - name: Upgrade case is valid - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - updated: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: - platform: - type: "" - diff --git a/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml deleted file mode 100644 index 6b6a4327a..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.featuregate.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] FeatureGate" -crd: 0000_10_config-operator_01_featuregate.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal FeatureGate - initial: | - apiVersion: config.openshift.io/v1 - kind: FeatureGate - spec: {} # No spec is required for a FeatureGate - expected: | - apiVersion: config.openshift.io/v1 - kind: FeatureGate - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml deleted file mode 100644 index 406bf3861..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.hypershift.authentication.testsuite.yaml +++ /dev/null @@ -1,298 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable][Hypershift] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-Default-Hypershift.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient - - name: Remove all oidcProviders - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus diff --git a/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml deleted file mode 100644 index 6bfbb820f..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.image.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Image" -crd: 0000_10_config-operator_01_image.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Image - initial: | - apiVersion: config.openshift.io/v1 - kind: Image - spec: {} # No spec is required for a Image - expected: | - apiVersion: config.openshift.io/v1 - kind: Image - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml deleted file mode 100644 index bffdb6bcd..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagecontentpolicy.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageContentPolicy" -crd: 0000_10_config-operator_01_imagecontentpolicy.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageContentPolicy - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageContentPolicy - spec: {} # No spec is required for a ImageContentPolicy - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageContentPolicy - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml deleted file mode 100644 index c25b1696b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagedigestmirrorset.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageDigestMirrorSet" -crd: 0000_10_config-operator_01_imagedigestmirrorset.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageDigestMirrorSet - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageDigestMirrorSet - spec: {} # No spec is required for a ImageDigestMirrorSet - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageDigestMirrorSet - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml deleted file mode 100644 index de91eb2c5..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.imagetagmirrorset.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] ImageTagMirrorSet" -crd: 0000_10_config-operator_01_imagetagmirrorset.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImageTagMirrorSet - initial: | - apiVersion: config.openshift.io/v1 - kind: ImageTagMirrorSet - spec: {} # No spec is required for a ImageTagMirrorSet - expected: | - apiVersion: config.openshift.io/v1 - kind: ImageTagMirrorSet - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml deleted file mode 100644 index 9d0861b68..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml +++ /dev/null @@ -1,1262 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - - name: Should be able to pass 2 IP addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - - name: Should not be able to pass not-an-IP to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - not-an-ip-address - expectedError: "Invalid value: \"not-an-ip-address\"" - - name: Should not be able to pass 2 IPv4 addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - 192.0.2.2 - expectedError: "apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass 2 IPv6 addresses to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - "2001:db8::1" - - "2001:db8::2" - expectedError: "apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass more than 2 entries to apiServerInternalIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - apiServerInternalIPs: - - 192.0.2.1 - - "2001:db8::1" - - 192.0.2.2 - expectedError: "Too many: 3: must have at most 2 items" - - name: Should be able to pass 2 IP addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - - name: Should not be able to pass not-an-IP to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - not-an-ip-address - expectedError: "Invalid value: \"not-an-ip-address\"" - - name: Should not be able to pass 2 IPv4 addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - 192.0.2.2 - expectedError: "ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass 2 IPv6 addresses to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - "2001:db8::1" - - "2001:db8::2" - expectedError: "ingressIPs must contain at most one IPv4 address and at most one IPv6 address" - - name: Should not be able to pass more than 2 entries to ingressIPs in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - ingressIPs: - - 192.0.2.1 - - "2001:db8::1" - - 192.0.2.2 - expectedError: "Too many: 3: must have at most 2 items" - - name: Should be able to pass 2 IP subnets addresses to machineNetworks in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - "192.0.2.0/24" - - "2001:db8::0/32" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - "192.0.2.0/24" - - "2001:db8::0/32" - - name: Should not be able to pass not-a-CIDR to machineNetworks in the platform spec - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: - machineNetworks: - - 192.0.2.1 - expectedError: "Invalid value: \"192.0.2.1\"" - onUpdate: - - name: Should be able to change External platformName from unknown to something else - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: Unknown - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - - name: Should not be able to change External platformName once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: M&PCloud - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: External - external: - platformName: SomeOtherCoolplatformName - expectedError: " spec.platformSpec.external.platformName: Invalid value: \"string\": platform name cannot be changed once set" - - name: Should not be able to modify an existing Azure ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing Azure ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing Azure ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add Azure ResourceTags to an empty platformStatus.azure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - azure: - resourceTags: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove Azure ResourceTags from platformStatus.azure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: {} - expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should be able to modify the ResourceGroupName while Azure ResourceTags are present - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - type: Azure - azure: - resourceGroupName: foo - resourceTags: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: Azure - platformStatus: - azure: - resourceGroupName: bar - resourceTags: - - {key: "key", value: "value"} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - cpuPartitioning: None - platform: Azure - platformStatus: - azure: - resourceGroupName: bar - resourceTags: - - {key: "key", value: "value"} - - name: PowerVS platform status's resourceGroup length should not exceed the max length set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group-should-not-accept-the-string-that-exceeds-max-length-set - expectedStatusError: "status.platformStatus.powervs.resourceGroup: Too long: may not be longer than 40" - - name: PowerVS platform status's resourceGroup should match the regex configured - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: re$ource-group - expectedStatusError: "status.platformStatus.powervs.resourceGroup in body should match '^[a-zA-Z0-9-_ ]+$'" - - name: Should not be able to change PowerVS platform status's resourceGroup once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - resourceGroup: other-resource-group-name - expectedStatusError: "status.platformStatus.powervs.resourceGroup: Invalid value: \"string\": resourceGroup is immutable once set" - - name: Should not be able to unset PowerVS platform status's resourceGroup once it was set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - region: some-region - resourceGroup: resource-group - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: PowerVS - status: - platform: PowerVS - platformStatus: - powervs: - region: some-region - expectedStatusError: "status.platformStatus.powervs: Invalid value: \"object\": cannot unset resourceGroup once set" - - name: Should set load balancer type to OpenShiftManagedDefault if not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: {} - type: OpenStack - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: OpenShiftManagedDefault - type: OpenStack - - name: Should be able to override the default load balancer with a valid value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - - name: Should not allow changing the immutable load balancer type field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: OpenShiftManagedDefault - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: OpenStack - openstack: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow removing the immutable load balancer type field that was initially set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: UserManaged - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: OpenStack - openstack: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: OpenStack - platformStatus: - openstack: {} - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow setting the load balancer type to a wrong value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - openstack: {} - type: OpenStack - status: - platform: OpenStack - platformStatus: - openstack: - loadBalancer: - type: FooBar - type: OpenStack - expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" - - name: Should not be able to update cloudControllerManager state to empty string when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platformStatus: - external: - cloudControllerManager: - state: "" - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to External when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to None when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - - name: Should not be able to unset cloudControllerManager state when state is already set to None - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should not be able to update cloudControllerManager state to empty string when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to None when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to External when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - - name: Should not be able to unset cloudControllerManager state when state is already set to External - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should not be able to update cloudControllerManager state to None when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should not be able to update cloudControllerManager state to External when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" - - name: Should be able to update cloudControllerManager state to empty string when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - - name: Should not be able to unset cloudControllerManager state when state is already set to empty string - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should be able to update cloudControllerManager state to None when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: None - - name: Should be able to update cloudControllerManager state to empty string when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: "" - - name: Should not be able to update cloudControllerManager state to External when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" - - name: Should be able to unset cloudControllerManager state when cloudControllerManager state is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - cpuPartitioning: None - platform: External - platformStatus: - type: External - external: - cloudControllerManager: {} - - name: Should not be able to add cloudControllerManager when cloudControllerManager is unset - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" - - name: Should not be able to remove cloudControllerManager when cloudControllerManager is set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: - cloudControllerManager: - state: External - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: External - platformStatus: - type: External - external: {} - expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" - - name: Should be able to add valid (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: https://dummy.cos.com - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: https://dummy.cos.com - - name: Should not be able to add empty (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: COS - url: " " - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[0].url: Invalid value: \"string\": url must be a valid absolute URL" - - name: Should not be able to add invalid (URL) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: COS - url: dummy-cos-com - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].url: Invalid value: \"string\": url must be a valid absolute URL" - - name: Should not be able to add invalid (Name) ServiceEndpoints to IBMCloud PlatformStatus - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: [] - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: IBMCloud - platformStatus: - type: IBMCloud - ibmcloud: - serviceEndpoints: - - name: VPC - url: https://dummy.vpc.com - - name: BadService - url: https://bad-service.com - expectedStatusError: " status.platformStatus.ibmcloud.serviceEndpoints[1].name: Unsupported value: \"BadService\": supported values: \"CIS\", \"COS\", \"DNSServices\", \"GlobalSearch\", \"GlobalTagging\", \"HyperProtect\", \"IAM\", \"KeyProtect\", \"ResourceController\", \"ResourceManager\", \"VPC\"" diff --git a/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml deleted file mode 100644 index 90d48e896..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.ingress.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Ingress" -crd: 0000_10_config-operator_01_ingress.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Ingress - initial: | - apiVersion: config.openshift.io/v1 - kind: Ingress - spec: {} # No spec is required for a Ingress - expected: | - apiVersion: config.openshift.io/v1 - kind: Ingress - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml deleted file mode 100644 index c85d122a6..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.network.testsuite.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Network" -crd: 0000_10_config-operator_01_network-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Network - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml deleted file mode 100644 index d6502600b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.node.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Node" -crd: 0000_10_config-operator_01_node.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Node - initial: | - apiVersion: config.openshift.io/v1 - kind: Node - spec: {} # No spec is required for a Node - expected: | - apiVersion: config.openshift.io/v1 - kind: Node - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml deleted file mode 100644 index d33d2bc1b..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.oauth.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] OAuth" -crd: 0000_10_config-operator_01_oauth.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal OAuth - initial: | - apiVersion: config.openshift.io/v1 - kind: OAuth - spec: {} # No spec is required for a OAuth - expected: | - apiVersion: config.openshift.io/v1 - kind: OAuth - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml deleted file mode 100644 index 9dd7a4c6d..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.operatorhub.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] OperatorHub" -crd: 0000_03_marketplace-operator_01_operatorhub.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal OperatorHub - initial: | - apiVersion: config.openshift.io/v1 - kind: OperatorHub - spec: {} # No spec is required for a OperatorHub - expected: | - apiVersion: config.openshift.io/v1 - kind: OperatorHub - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml deleted file mode 100644 index 0144ad32f..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.project.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Project" -crd: 0000_10_config-operator_01_project.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Project - initial: | - apiVersion: config.openshift.io/v1 - kind: Project - spec: {} # No spec is required for a Project - expected: | - apiVersion: config.openshift.io/v1 - kind: Project - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml deleted file mode 100644 index d49b83247..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.proxy.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Proxy" -crd: 0000_03_config-operator_01_proxy.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Proxy - initial: | - apiVersion: config.openshift.io/v1 - kind: Proxy - spec: {} # No spec is required for a Proxy - expected: | - apiVersion: config.openshift.io/v1 - kind: Proxy - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml deleted file mode 100644 index d65965482..000000000 --- a/vendor/github.com/openshift/api/config/v1/stable.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-Default.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml deleted file mode 100644 index 74aa92b47..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] APIServer" -crd: 0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create encrypt with aescbc - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aescbc - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aescbc - - name: Should be able to create encrypt with aesgcm - initial: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - encryption: - type: aesgcm - expected: | - apiVersion: config.openshift.io/v1 - kind: APIServer - spec: - audit: - profile: Default - encryption: - type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml deleted file mode 100644 index 9d978fcf5..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.authentication.testsuite.yaml +++ /dev/null @@ -1,298 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Authentication" -crd: 0000_10_config-operator_01_authentication.crd-TechPreviewNoUpgrade.yaml -tests: - onCreate: - - name: Should be able to create a minimal Authentication - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} # No spec is required for a Authentication - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: {} - - name: Should be able to use the OIDC type - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - - name: Cannot set username claim prefix with policy NoPrefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - prefix: - prefixString: "myoidc:" - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set username claim prefix with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - prefix: - prefixString: "myoidc:" - - name: Cannot leave username claim prefix blank with policy Prefix - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: Prefix - expectedError: "prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" - - name: Can set OIDC providers with no username prefixing - initial: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - expected: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - claimMappings: - username: - claim: "preferred_username" - prefixPolicy: NoPrefix - onUpdate: - - name: Updating OIDC provider with a client that's not in the status - initial: &initConfig | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: someclient - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: dif-namespace # new client here - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating OIDC provider with a client that's different from the previous one - initial: *initConfig - updated: | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: dif-namespace - componentName: tehName - clientID: cool-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expectedError: "all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" - - name: Updating previously existing client - initial: *initConfig - updated: &prevExistingUpdated | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *prevExistingUpdated - - name: Removing a configured client from the status (== component unregister) - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus - - name: Simply add a valid client - initial: *initConfig - updated: &addClient | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - oidcProviders: - - name: myoidc - issuer: - issuerURL: https://meh.tld - audiences: ['openshift-aud'] - oidcClients: - - componentNamespace: namespace - componentName: preexisting - clientID: different-client - - componentNamespace: namespace - componentName: name - clientID: legitclient - - componentNamespace: namespace2 - componentName: name3 - clientID: justavalidclient - status: - oidcClients: - - componentNamespace: namespace - componentName: name - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *addClient - - name: Remove all oidcProviders - initial: *initConfig - updated: &removeFromStatus | - apiVersion: config.openshift.io/v1 - kind: Authentication - spec: - type: OIDC - status: - oidcClients: - - componentNamespace: namespace2 - componentName: name2 - - componentNamespace: namespace2 - componentName: name3 - expected: *removeFromStatus diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml deleted file mode 100644 index 71988108e..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.clusterversion.testsuite.yaml +++ /dev/null @@ -1,472 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ClusterVersion" -crd: 0000_00_cluster-version-operator_01_clusterversion-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ClusterVersion - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - - name: Should allow image to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - - name: Should allow version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - version: 4.11.1 - - name: Should allow architecture to be empty - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: "" - version: 4.11.1 - - name: Should allow architecture and version to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - - name: Version must be set if architecture is set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - expectedError: "Version must be set if Architecture is set" - - name: Should not allow image and architecture to be set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities baremetal and MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities baremetal without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to create a ClusterVersion with base capability None, and additional capabilities marketplace and OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to create a ClusterVersion with base capability None, and additional capabilities marketplace without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability - - name: Should be able to set a custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus.ocp.com" - - name: Should be able to set multiple custom signature store - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "https://osus1.ocp.com" - - url: "https://osus2.ocp.com" - - name: Invalid custom signature store should throw error - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: - - url: "osus1.ocp.com" - expectedError: "url must be a valid absolute URL" - - name: Should be able to unset the signature stores - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - signatureStores: [] - onUpdate: - - name: Should not allow image to be set if architecture set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should not allow architecture to be set if image set - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - image: bar - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - desiredUpdate: - architecture: Multi - version: 4.11.1 - image: bar - expectedError: "cannot set both Architecture and Image" - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, and implicitly enabled MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - MachineAPI - - name: Should be able to add the baremetal capability with a ClusterVersion with base capability None, with the Machine API capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - - MachineAPI - - name: Should not be able to add the baremetal capability with a ClusterVersion with base capability None, and without MachineAPI - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - baremetal - expectedError: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, and implicitly enabled OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - status: - desired: - version: foo - image: foo - observedGeneration: 1 - versionHash: foo - availableUpdates: - - version: foo - image: foo - capabilities: - enabledCapabilities: - - OperatorLifecycleManager - - name: Should be able to add the marketplace capability with a ClusterVersion with base capability None, with the OperatorLifecycleManager capability - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - expected: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - - OperatorLifecycleManager - - name: Should not be able to add the marketplace capability with a ClusterVersion with base capability None, and without OperatorLifecycleManager - initial: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - updated: | - apiVersion: config.openshift.io/v1 - kind: ClusterVersion - spec: - clusterID: foo - capabilities: - baselineCapabilitySet: None - additionalEnabledCapabilities: - - marketplace - expectedError: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml deleted file mode 100644 index ec64352e3..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreview] DNS" -crd: 0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DNS - initial: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} # No spec is required for a DNS - expected: | - apiVersion: config.openshift.io/v1 - kind: DNS - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml deleted file mode 100644 index d4a1113f0..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml +++ /dev/null @@ -1,749 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Infrastructure" -crd: 0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Infrastructure - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} # No spec is required for a Infrastructure - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - onUpdate: - - name: Status Should contain default fields - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - controlPlaneTopology: HighlyAvailable - - name: Status update cpuPartitioning should fail validation check - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: None - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - cpuPartitioning: "Invalid" - expectedStatusError: 'status.cpuPartitioning: Unsupported value: "Invalid": supported values: "None", "AllNodes"' - - name: Should set load balancer type to OpenShiftManagedDefault if not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: {} - type: BareMetal - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: OpenShiftManagedDefault - type: BareMetal - - name: Should be able to override the default load balancer with a valid value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - - name: Should not allow changing the immutable load balancer type field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: OpenShiftManagedDefault - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow removing the immutable load balancer type field that was initially set - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: UserManaged - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: BareMetal - baremetal: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: BareMetal - platformStatus: - baremetal: {} - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" - - name: Should not allow setting the load balancer type to a wrong value - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - baremetal: {} - type: BareMetal - status: - platform: BareMetal - platformStatus: - baremetal: - loadBalancer: - type: FooBar - type: BareMetal - expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" - - name: Should not be able to modify an existing GCP ResourceLabels Label - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add a Label to an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to remove a Label from an existing GCP ResourceLabels - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "new", value: "entry"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceLabels: - - {key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" - - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "openshift-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceLabels: - - {key: "key", value: "value"} - - {key: "kubernetes-io-created-cluster", value: "true"} - expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" - - name: Should not be able to modify an existing GCP ResourceTags Tag - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "changed"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add a Tag to an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - - {parentID: "test-project-123", key: "new", value: "tag"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to remove a Tag from an existing GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - - {parentID: "test-project-123", key: "key2", value: "value2"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key1", value: "value1"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: {} - expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" - - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - controlPlaneTopology: "HighlyAvailable" - infrastructureTopology: "HighlyAvailable" - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "1234567890", key: "key", value: "value"} - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: {} - status: - platform: GCP - platformStatus: - type: GCP - gcp: - resourceTags: - - {parentID: "test-project-123", key: "key", value: "value"} - expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" - - name: dnsType should default to `PlatformDefault` when not specified - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - type: GCP - - name: should be able to set dnsType to non-default value of `ClusterHosted` - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - type: GCP - expected: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - cpuPartitioning: None - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - type: GCP - - name: Should not allow changing the immutable dnsType field - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.dnsType: Invalid value: \"string\": dnsType is immutable" - - name: Should not accept non-IP address values for Load Balancer IPs - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - - not-an-ip-address - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[1]: Invalid value: \"not-an-ip-address\": status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[1] in body should match '(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*)'" - - name: Should not accept update when `clusterHosted` is specified with DNSType `PlatformDefault` - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: PlatformDefault - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig: Invalid value: \"object\": clusterHosted is permitted only when dnsType is ClusterHosted" - - name: Should not accept duplicate IP addresses for any of the Load Balancer IPs - initial: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - gcp: {} - type: GCP - updated: | - apiVersion: config.openshift.io/v1 - kind: Infrastructure - spec: - platformSpec: - type: GCP - gcp: {} - status: - controlPlaneTopology: HighlyAvailable - infrastructureTopology: HighlyAvailable - platform: GCP - platformStatus: - gcp: - cloudLoadBalancerConfig: - dnsType: ClusterHosted - clusterHosted: - apiIntLoadBalancerIPs: - - 10.10.10.20 - - 10.10.20.20 - - 10.10.10.20 - type: GCP - expectedStatusError: "status.platformStatus.gcp.cloudLoadBalancerConfig.clusterHosted.apiIntLoadBalancerIPs[2]: Duplicate value: \"10.10.10.20\"" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml deleted file mode 100644 index d15fae3a9..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.network.testsuite.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] Network" -crd: 0000_10_config-operator_01_network-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to set status conditions - initial: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} # No spec is required for a Network - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" - expected: | - apiVersion: config.openshift.io/v1 - kind: Network - spec: {} - status: - conditions: - - type: NetworkTypeMigrationInProgress - status: "False" - reason: "Reason" - message: "Message" - lastTransitionTime: "2023-10-25T12:00:00Z" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml deleted file mode 100644 index 5b5eb8954..000000000 --- a/vendor/github.com/openshift/api/config/v1/techpreview.scheduler.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] Scheduler" -crd: 0000_10_config-operator_01_scheduler-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal Scheduler - initial: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} # No spec is required for a Scheduler - expected: | - apiVersion: config.openshift.io/v1 - kind: Scheduler - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 59b89388b..bdae46689 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -14,6 +14,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=apiservers,scope=Cluster +// +kubebuilder:subresource:status type APIServer struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 62c9e7f5a..b3dfa61b5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -4,15 +4,19 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced -// +kubebuilder:subresource:status // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=authentications,scope=Cluster +// +kubebuilder:subresource:status type Authentication struct { metav1.TypeMeta `json:",inline"` @@ -85,7 +89,7 @@ type AuthenticationSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:MaxItems=1 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=ExternalOIDC OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -112,7 +116,7 @@ type AuthenticationStatus struct { // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=ExternalOIDC OIDCClients []OIDCClientStatus `json:"oidcClients"` } @@ -130,8 +134,8 @@ type AuthenticationList struct { Items []Authentication `json:"items"` } -// +openshift:validation:FeatureSetAwareEnum:featureSet=Default,enum="";None;IntegratedOAuth -// +openshift:validation:FeatureSetAwareEnum:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,enum="";None;IntegratedOAuth;OIDC +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC type AuthenticationType string const ( diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index e9aef0375..dad47666d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -16,6 +16,12 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01 +// +openshift:capability=Build +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=builds,scope=Cluster +// +kubebuilder:subresource:status type Build struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 78666bb1e..7951762cc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -15,6 +15,17 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at. +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable. +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes. +// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded. +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed. +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true type ClusterOperator struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index dc913b75c..2b8c30213 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -13,8 +13,19 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversions,scope=Cluster // +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'baremetal' in self.spec.capabilities.additionalEnabledCapabilities ? 'MachineAPI' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'MachineAPI' in self.status.capabilities.enabledCapabilities) : true",message="the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability" // +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability" +// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string +// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string +// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string +// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date +// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true type ClusterVersion struct { metav1.TypeMeta `json:",inline"` @@ -100,7 +111,7 @@ type ClusterVersionSpec struct { // // A maximum of 32 signature stores may be configured. // +kubebuilder:validation:MaxItems=32 - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=SignatureStores // +listType=map // +listMapKey=url // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index 928181849..36b1696af 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -14,6 +14,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=consoles,scope=Cluster +// +kubebuilder:subresource:status type Console struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 5f8697673..1875c9cdd 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -10,6 +10,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status type DNS struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index f608fd0be..2efe16f4e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -1,8 +1,6 @@ package v1 import ( - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -14,6 +12,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=featuregates,scope=Cluster +// +kubebuilder:subresource:status type FeatureGate struct { metav1.TypeMeta `json:",inline"` @@ -47,6 +50,9 @@ var ( // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. LatencySensitive FeatureSet = "LatencySensitive" + + // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade} ) type FeatureGateSpec struct { @@ -59,6 +65,8 @@ type FeatureGateSelection struct { // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. // +unionDiscriminator // +optional + // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" FeatureSet FeatureSet `json:"featureSet,omitempty"` // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. @@ -143,171 +151,3 @@ type FeatureGateEnabledDisabled struct { Enabled []FeatureGateDescription Disabled []FeatureGateDescription } - -// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. -// -// NOTE: The caller needs to make sure to check for the existence of the value -// using golang's existence field. A possible scenario is an upgrade where new -// FeatureSets are added and a controller has not been upgraded with a newer -// version of this file. In this upgrade scenario the map could return nil. -// -// example: -// -// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } -// -// If you put an item in either of these lists, put your area and name on it so we can find owners. -var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ - Default: defaultFeatures, - CustomNoUpgrade: { - Enabled: []FeatureGateDescription{}, - Disabled: []FeatureGateDescription{ - disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. - }, - }, - TechPreviewNoUpgrade: newDefaultFeatures(). - with(validatingAdmissionPolicy). - with(csiDriverSharedResource). - with(nodeSwap). - with(machineAPIProviderOpenStack). - with(insightsConfigAPI). - with(dynamicResourceAllocation). - with(gateGatewayAPI). - with(maxUnavailableStatefulSet). - without(eventedPleg). - with(sigstoreImageVerification). - with(gcpLabelsTags). - with(gcpClusterHostedDNS). - with(vSphereStaticIPs). - with(routeExternalCertificate). - with(automatedEtcdBackup). - without(machineAPIOperatorDisableMachineHealthCheckController). - with(adminNetworkPolicy). - with(dnsNameResolver). - with(machineConfigNodes). - with(metricsServer). - with(installAlternateInfrastructureAWS). - without(clusterAPIInstall). - with(mixedCPUsAllocation). - with(managedBootImages). - without(disableKubeletCloudCredentialProviders). - with(onClusterBuild). - with(signatureStores). - with(pinnedImages). - with(upgradeStatus). - with(translateStreamCloseWebsocketRequests). - with(volumeGroupSnapshot). - toFeatures(defaultFeatures), - LatencySensitive: newDefaultFeatures(). - toFeatures(defaultFeatures), -} - -var defaultFeatures = &FeatureGateEnabledDisabled{ - Enabled: []FeatureGateDescription{ - openShiftPodSecurityAdmission, - alibabaPlatform, // This is a bug, it should be TechPreviewNoUpgrade. This must be downgraded before 4.14 is shipped. - azureWorkloadIdentity, - cloudDualStackNodeIPs, - externalCloudProvider, - externalCloudProviderAzure, - externalCloudProviderGCP, - externalCloudProviderExternal, - privateHostedZoneAWS, - buildCSIVolumes, - kmsv1, - vSphereControlPlaneMachineset, - sdnLiveMigration, - }, - Disabled: []FeatureGateDescription{ - disableKubeletCloudCredentialProviders, // We do not currently ship the correct config to use the external credentials provider. - }, -} - -type featureSetBuilder struct { - forceOn []FeatureGateDescription - forceOff []FeatureGateDescription -} - -func newDefaultFeatures() *featureSetBuilder { - return &featureSetBuilder{} -} - -func (f *featureSetBuilder) with(forceOn FeatureGateDescription) *featureSetBuilder { - for _, curr := range f.forceOn { - if curr.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { - panic(fmt.Errorf("coding error: %q enabled twice", forceOn.FeatureGateAttributes.Name)) - } - } - f.forceOn = append(f.forceOn, forceOn) - return f -} - -func (f *featureSetBuilder) without(forceOff FeatureGateDescription) *featureSetBuilder { - for _, curr := range f.forceOff { - if curr.FeatureGateAttributes.Name == forceOff.FeatureGateAttributes.Name { - panic(fmt.Errorf("coding error: %q disabled twice", forceOff.FeatureGateAttributes.Name)) - } - } - f.forceOff = append(f.forceOff, forceOff) - return f -} - -func (f *featureSetBuilder) isForcedOff(needle FeatureGateDescription) bool { - for _, forcedOff := range f.forceOff { - if needle.FeatureGateAttributes.Name == forcedOff.FeatureGateAttributes.Name { - return true - } - } - return false -} - -func (f *featureSetBuilder) isForcedOn(needle FeatureGateDescription) bool { - for _, forceOn := range f.forceOn { - if needle.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { - return true - } - } - return false -} - -func (f *featureSetBuilder) toFeatures(defaultFeatures *FeatureGateEnabledDisabled) *FeatureGateEnabledDisabled { - finalOn := []FeatureGateDescription{} - finalOff := []FeatureGateDescription{} - - // only add the default enabled features if they haven't been explicitly set off - for _, defaultOn := range defaultFeatures.Enabled { - if !f.isForcedOff(defaultOn) { - finalOn = append(finalOn, defaultOn) - } - } - for _, currOn := range f.forceOn { - if f.isForcedOff(currOn) { - panic("coding error, you can't have features both on and off") - } - found := false - for _, alreadyOn := range finalOn { - if alreadyOn.FeatureGateAttributes.Name == currOn.FeatureGateAttributes.Name { - found = true - } - } - if found { - continue - } - - finalOn = append(finalOn, currOn) - } - - // only add the default disabled features if they haven't been explicitly set on - for _, defaultOff := range defaultFeatures.Disabled { - if !f.isForcedOn(defaultOff) { - finalOff = append(finalOff, defaultOff) - } - } - for _, currOff := range f.forceOff { - finalOff = append(finalOff, currOff) - } - - return &FeatureGateEnabledDisabled{ - Enabled: finalOn, - Disabled: finalOff, - } -} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index 928224c0d..74511f864 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -15,6 +15,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=images,scope=Cluster +// +kubebuilder:subresource:status type Image struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 3dc315f68..f2faf1996 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -11,6 +11,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster +// +kubebuilder:subresource:status type ImageContentPolicy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 987c6cfdc..8fa38f223 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -11,6 +11,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms +// +kubebuilder:subresource:status type ImageDigestMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index 295522e59..d9627b78c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -11,6 +11,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms +// +kubebuilder:subresource:status type ImageTagMirrorSet struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 4ff2e68f1..611ba928c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -13,6 +13,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=infrastructures,scope=Cluster +// +kubebuilder:subresource:status type Infrastructure struct { metav1.TypeMeta `json:",inline"` @@ -603,8 +608,8 @@ const ( type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` @@ -621,7 +626,7 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPLabelsTags ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. @@ -632,7 +637,7 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPLabelsTags ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` // This field was introduced and removed under tech preview. @@ -649,7 +654,7 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=GCPClusterHostedDNS // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -898,7 +903,7 @@ type BareMetalPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -1105,7 +1110,7 @@ type OvirtPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` } @@ -1235,6 +1240,7 @@ type VSpherePlatformTopology struct { // VSpherePlatformFailureDomainSpec. // For example, for zone=zonea, region=region1, and infrastructure name=test, // the template path would be calculated as //vm/test-rhcos-region1-zonea. + // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` @@ -1436,7 +1442,7 @@ type VSpherePlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -1829,7 +1835,7 @@ type NutanixPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index e518f6765..e58ad7f00 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -13,6 +13,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=ingresses,scope=Cluster +// +kubebuilder:subresource:status type Ingress struct { metav1.TypeMeta `json:",inline"` @@ -164,20 +169,20 @@ const ( // +kubebuilder:validation:MaxLength=512 type ConsumingUser string -// Hostname is an alias for hostname string validation. -// -// The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it -// allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric -// characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. -// ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ -// -// The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, -// except that it allows hostnames longer than the maximum length: -// ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ -// -// Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname -// was saved via validation by the incorrect left operand of the | operator. -// +// Hostname is a host name as defined by RFC-1123. +// + --- +// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// + +// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// + except that it allows hostnames longer than the maximum length: +// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// + +// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// + was saved via validation by the incorrect left operand of the | operator. +// + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` type Hostname string diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 794f3db7b..618aeff3b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -1,6 +1,9 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -10,7 +13,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Please view network.spec for an explanation on what applies when configuring this resource. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 // +openshift:compatibility-gen:level=1 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=networks,scope=Cluster type Network struct { metav1.TypeMeta `json:",inline"` @@ -34,6 +41,7 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled" type NetworkSpec struct { // IP address pool to use for pod IPs. // This field is immutable after installation. @@ -66,6 +74,17 @@ type NetworkSpec struct { // installed. // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` + + // networkDiagnostics defines network diagnostics configuration. + // + // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. + // If networkDiagnostics is not specified or is empty, + // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, + // the network diagnostics feature will be disabled. + // + // +optional + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig + NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"` } // NetworkStatus is the current network configuration. @@ -88,13 +107,15 @@ type NetworkStatus struct { // conditions represents the observations of a network.config current state. // Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady", - // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse" - // and "NetworkTypeMigrationOriginalCNIPurged" + // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse", + // "NetworkTypeMigrationOriginalCNIPurged" and "NetworkDiagnosticsAvailable" // +optional // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type + // +openshift:enable:FeatureGate=NetworkLiveMigration + // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -192,3 +213,89 @@ type MTUMigrationValues struct { // +optional From *uint32 `json:"from,omitempty"` } + +// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes +// Valid values are "", "All", "Disabled". +// +kubebuilder:validation:Enum:="";All;Disabled +type NetworkDiagnosticsMode string + +const ( + // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left + // to choose reasonable default. The current default is All and is a subject to change over time. + NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = "" + // NetworkDiagnosticsAll means that all network diagnostics checks are enabled + NetworkDiagnosticsAll NetworkDiagnosticsMode = "All" + // NetworkDiagnosticsDisabled means that network diagnostics is disabled + NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled" +) + +// NetworkDiagnostics defines network diagnostics configuration + +type NetworkDiagnostics struct { + // mode controls the network diagnostics mode + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is All. + // + // +optional + Mode NetworkDiagnosticsMode `json:"mode"` + + // sourcePlacement controls the scheduling of network diagnostics source deployment + // + // See NetworkDiagnosticsSourcePlacement for more details about default values. + // + // +optional + SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"` + + // targetPlacement controls the scheduling of network diagnostics target daemonset + // + // See NetworkDiagnosticsTargetPlacement for more details about default values. + // + // +optional + TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"` +} + +// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components +type NetworkDiagnosticsSourcePlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is an empty list. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} + +// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components +type NetworkDiagnosticsTargetPlacement struct { + // nodeSelector is the node selector applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `kubernetes.io/os: linux`. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector"` + + // tolerations is a list of tolerations applied to network diagnostics components + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is `- operator: "Exists"` which means that all taints are tolerated. + // + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 233c89d9c..3dd31f39a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -14,6 +14,9 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true // +kubebuilder:resource:path=nodes,scope=Cluster // +kubebuilder:subresource:status type Node struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index 451a5ec38..6654479dc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -14,6 +14,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=oauths,scope=Cluster +// +kubebuilder:subresource:status type OAuth struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index ba2c96343..1fddfa51e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -38,9 +38,14 @@ type OperatorHubStatus struct { // enabled to disabled and vice versa. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=operatorhubs,scope=Cluster // +kubebuilder:subresource:status // +genclient // +genclient:nonNamespaced +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01 +// +openshift:capability=marketplace // +openshift:compatibility-gen:level=1 type OperatorHub struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 85afb90c2..8d6d614b6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -10,6 +10,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=projects,scope=Cluster +// +kubebuilder:subresource:status type Project struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 40ed296d6..851291bb0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -12,6 +12,11 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=proxies,scope=Cluster +// +kubebuilder:subresource:status type Proxy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 07c4d2e42..061c4a883 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -11,6 +11,11 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=schedulers,scope=Cluster +// +kubebuilder:subresource:status type Scheduler struct { metav1.TypeMeta `json:",inline"` @@ -43,7 +48,7 @@ type SchedulerSpec struct { // +optional Profile SchedulerProfile `json:"profile,omitempty"` // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles. - // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +openshift:enable:FeatureGate=DynamicResourceAllocation // +optional ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"` // defaultNodeSelector helps set the cluster-wide default node selector to diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go new file mode 100644 index 000000000..4d642e060 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -0,0 +1,46 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into +// the payload for later analysis on a per-payload basis. +// This doesn't need any CRD because it's never stored in the cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type TestReporting struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec TestReportingSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status TestReportingStatus `json:"status"` +} + +type TestReportingSpec struct { + // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` +} + +type FeatureGateTests struct { + // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + FeatureGate string `json:"featureGate"` + + // Tests contains an item for every TestName + Tests []TestDetails `json:"tests"` +} + +type TestDetails struct { + // TestName is the name of the test as it appears in junit XMLs. + // It does not include the suite name since the same test can be executed in many suites. + TestName string `json:"testName"` +} + +type TestReportingStatus struct { +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index ff9409905..c80e66c43 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -2209,6 +2209,27 @@ func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) { + *out = *in + if in.Tests != nil { + in, out := &in.Tests, &out.Tests + *out = make([]TestDetails, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests. +func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { + if in == nil { + return nil + } + out := new(FeatureGateTests) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -3597,6 +3618,84 @@ func (in *Network) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) { + *out = *in + in.SourcePlacement.DeepCopyInto(&out.SourcePlacement) + in.TargetPlacement.DeepCopyInto(&out.TargetPlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics. +func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics { + if in == nil { + return nil + } + out := new(NetworkDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement. +func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsSourcePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement. +func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement { + if in == nil { + return nil + } + out := new(NetworkDiagnosticsTargetPlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkList) DeepCopyInto(out *NetworkList) { *out = *in @@ -3669,6 +3768,7 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = new(ExternalIPConfig) (*in).DeepCopyInto(*out) } + in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics) return } @@ -5494,6 +5594,81 @@ func (in *TemplateReference) DeepCopy() *TemplateReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestDetails) DeepCopyInto(out *TestDetails) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails. +func (in *TestDetails) DeepCopy() *TestDetails { + if in == nil { + return nil + } + out := new(TestDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReporting) DeepCopyInto(out *TestReporting) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting. +func (in *TestReporting) DeepCopy() *TestReporting { + if in == nil { + return nil + } + out := new(TestReporting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) { + *out = *in + if in.TestsForFeatureGates != nil { + in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates + *out = make([]FeatureGateTests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec. +func (in *TestReportingSpec) DeepCopy() *TestReportingSpec { + if in == nil { + return nil + } + out := new(TestReportingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus. +func (in *TestReportingStatus) DeepCopy() *TestReportingStatus { + if in == nil { + return nil + } + out := new(TestReportingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..286bbbd84 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,490 @@ +apiservers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: apiservers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: APIServer + Labels: {} + PluralName: apiservers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +authentications.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: authentications.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ExternalOIDC + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Authentication + Labels: {} + PluralName: authentications + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +builds.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: builds.config.openshift.io + Capability: Build + Category: "" + FeatureGates: [] + FilenameOperatorName: openshift-controller-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Build + Labels: {} + PluralName: builds + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +clusteroperators.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/497 + CRDName: clusteroperators.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterOperator + Labels: {} + PluralName: clusteroperators + PrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + Scope: Cluster + ShortNames: + - co + TopLevelFeatureGates: [] + Version: v1 + +clusterversions.config.openshift.io: + Annotations: + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/495 + CRDName: clusterversions.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SignatureStores + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterVersion + Labels: {} + PluralName: clusterversions + PrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +consoles.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: consoles.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Console + Labels: {} + PluralName: consoles + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +dnses.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: dnses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: DNS + Labels: {} + PluralName: dnses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +featuregates.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: featuregates.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: FeatureGate + Labels: {} + PluralName: featuregates + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +images.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: images.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Image + Labels: {} + PluralName: images + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagecontentpolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/874 + CRDName: imagecontentpolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageContentPolicy + Labels: {} + PluralName: imagecontentpolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +imagedigestmirrorsets.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagedigestmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageDigestMirrorSet + Labels: {} + PluralName: imagedigestmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - idms + TopLevelFeatureGates: [] + Version: v1 + +imagetagmirrorsets.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1126 + CRDName: imagetagmirrorsets.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImageTagMirrorSet + Labels: {} + PluralName: imagetagmirrorsets + PrinterColumns: [] + Scope: Cluster + ShortNames: + - itms + TopLevelFeatureGates: [] + Version: v1 + +infrastructures.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: infrastructures.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - BareMetalLoadBalancer + - GCPClusterHostedDNS + - GCPLabelsTags + - VSphereControlPlaneMachineSet + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Infrastructure + Labels: {} + PluralName: infrastructures + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +ingresses.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: ingresses.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Ingress + Labels: {} + PluralName: ingresses + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +networks.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: networks.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - NetworkDiagnosticsConfig + - NetworkLiveMigration + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: Network + Labels: {} + PluralName: networks + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +nodes.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1107 + CRDName: nodes.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Node + Labels: {} + PluralName: nodes + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +oauths.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: oauths.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: OAuth + Labels: {} + PluralName: oauths + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +operatorhubs.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: operatorhubs.config.openshift.io + Capability: marketplace + Category: "" + FeatureGates: [] + FilenameOperatorName: marketplace + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: OperatorHub + Labels: {} + PluralName: operatorhubs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +projects.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: projects.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Project + Labels: {} + PluralName: projects + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +proxies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: proxies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: config.openshift.io + HasStatus: true + KindName: Proxy + Labels: {} + PluralName: proxies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + +schedulers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: schedulers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DynamicResourceAllocation + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Scheduler + Labels: {} + PluralName: schedulers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index f751368b3..fcb4fb9a4 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -1987,6 +1987,36 @@ func (Network) SwaggerDoc() map[string]string { return map_Network } +var map_NetworkDiagnostics = map[string]string{ + "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.", + "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.", + "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.", +} + +func (NetworkDiagnostics) SwaggerDoc() map[string]string { + return map_NetworkDiagnostics +} + +var map_NetworkDiagnosticsSourcePlacement = map[string]string{ + "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.", +} + +func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsSourcePlacement +} + +var map_NetworkDiagnosticsTargetPlacement = map[string]string{ + "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components", + "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.", + "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.", +} + +func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string { + return map_NetworkDiagnosticsTargetPlacement +} + var map_NetworkList = map[string]string{ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -2013,6 +2043,7 @@ var map_NetworkSpec = map[string]string{ "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", + "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", } func (NetworkSpec) SwaggerDoc() map[string]string { @@ -2026,7 +2057,7 @@ var map_NetworkStatus = map[string]string{ "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", "migration": "Migration contains the cluster network migration configuration.", - "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\" and \"NetworkTypeMigrationOriginalCNIPurged\"", + "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\", \"NetworkTypeMigrationOriginalCNIPurged\" and \"NetworkDiagnosticsAvailable\"", } func (NetworkStatus) SwaggerDoc() map[string]string { @@ -2476,6 +2507,41 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { return map_SchedulerSpec } +var map_FeatureGateTests = map[string]string{ + "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "Tests contains an item for every TestName", +} + +func (FeatureGateTests) SwaggerDoc() map[string]string { + return map_FeatureGateTests +} + +var map_TestDetails = map[string]string{ + "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", +} + +func (TestDetails) SwaggerDoc() map[string]string { + return map_TestDetails +} + +var map_TestReporting = map[string]string{ + "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (TestReporting) SwaggerDoc() map[string]string { + return map_TestReporting +} + +var map_TestReportingSpec = map[string]string{ + "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", +} + +func (TestReportingSpec) SwaggerDoc() map[string]string { + return map_TestReportingSpec +} + var map_CustomTLSProfile = map[string]string{ "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 697eb7332..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,142 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1482 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: backups.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "Backup provides configuration for performing backups of the - openshift cluster. \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - etcd: - description: etcd specifies the configuration for periodic backups - of the etcd cluster - properties: - pvcName: - description: PVCName specifies the name of the PersistentVolumeClaim - (PVC) which binds a PersistentVolume where the etcd backup files - would be saved The PVC itself must always be created in the - "openshift-etcd" namespace If the PVC is left unspecified "" - then the platform will choose a reasonable default location - to save the backup. In the future this would be backups saved - across the control-plane master nodes. - type: string - retentionPolicy: - description: RetentionPolicy defines the retention policy for - retaining and deleting existing backups. - properties: - retentionNumber: - description: RetentionNumber configures the retention policy - based on the number of backups - properties: - maxNumberOfBackups: - description: MaxNumberOfBackups defines the maximum number - of backups to retain. If the existing number of backups - saved is equal to MaxNumberOfBackups then the oldest - backup will be removed before a new backup is initiated. - minimum: 1 - type: integer - required: - - maxNumberOfBackups - type: object - retentionSize: - description: RetentionSize configures the retention policy - based on the size of backups - properties: - maxSizeOfBackupsGb: - description: MaxSizeOfBackupsGb defines the total size - in GB of backups to retain. If the current total size - backups exceeds MaxSizeOfBackupsGb then the oldest backup - will be removed before a new backup is initiated. - minimum: 1 - type: integer - required: - - maxSizeOfBackupsGb - type: object - retentionType: - allOf: - - enum: - - RetentionNumber - - RetentionSize - - enum: - - "" - - RetentionNumber - - RetentionSize - description: RetentionType sets the type of retention policy. - Currently, the only valid policies are retention by number - of backups (RetentionNumber), by the size of backups (RetentionSize). - More policies or types may be added in the future. Empty - string means no opinion and the platform is left to choose - a reasonable default which is subject to change without - notice. The current default is RetentionNumber with 15 backups - kept. - type: string - required: - - retentionType - type: object - schedule: - description: 'Schedule defines the recurring backup schedule in - Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 - * * * Empty string means no opinion and the platform is left - to choose a reasonable default which is subject to change without - notice. The current default is "no backups", but will change - in the future.' - pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) - (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) - (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) - (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) - (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$ - type: string - timeZone: - description: The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. - If not specified, this will default to the time zone of the - kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - pattern: ^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$ - type: string - type: object - required: - - etcd - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 69dbe3a2f..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: clusterimagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterImagePolicy - listKind: ClusterImagePolicyList - plural: clusterimagepolicies - singular: clusterimagepolicy - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ClusterImagePolicy holds cluster-wide configuration for image - signature verification \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec contains the configuration for the cluster image policy. - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix†if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix†- (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 834c03ae1..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: clusterimagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ClusterImagePolicy - listKind: ClusterImagePolicyList - plural: clusterimagepolicies - singular: clusterimagepolicy - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ClusterImagePolicy holds cluster-wide configuration for image - signature verification \n Compatibility level 4: No compatibility is provided, - the API can change at any point for any reason. These capabilities should - not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec contains the configuration for the cluster image policy. - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix†if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix†- (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml deleted file mode 100644 index 642a19f78..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: imagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImagePolicy - listKind: ImagePolicyList - plural: imagepolicies - singular: imagepolicy - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ImagePolicy holds namespace-wide configuration for image signature - verification \n Compatibility level 4: No compatibility is provided, the - API can change at any point for any reason. These capabilities should not - be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix†if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix†- (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 2f5ea8863..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,398 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1457 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: imagepolicies.config.openshift.io -spec: - group: config.openshift.io - names: - kind: ImagePolicy - listKind: ImagePolicyList - plural: imagepolicies - singular: imagepolicy - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "ImagePolicy holds namespace-wide configuration for image signature - verification \n Compatibility level 4: No compatibility is provided, the - API can change at any point for any reason. These capabilities should not - be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - policy: - description: policy contains configuration to allow scopes to be verified, - and defines how images not matching the verification policy will - be treated. - properties: - rootOfTrust: - description: rootOfTrust specifies the root of trust for the policy. - properties: - fulcioCAWithRekor: - description: 'fulcioCAWithRekor defines the root of trust - based on the Fulcio certificate and the Rekor public key. - For more information about Fulcio and Rekor, please refer - to the document at: https://github.com/sigstore/fulcio and - https://github.com/sigstore/rekor' - properties: - fulcioCAData: - description: fulcioCAData contains inline base64-encoded - data for the PEM format fulcio CA. fulcioCAData must - be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - fulcioSubject: - description: fulcioSubject specifies OIDC issuer and the - email of the Fulcio authentication configuration. - properties: - oidcIssuer: - description: 'oidcIssuer contains the expected OIDC - issuer. It will be verified that the Fulcio-issued - certificate contains a (Fulcio-defined) certificate - extension pointing at this OIDC issuer URL. When - Fulcio issues certificates, it includes a value - based on an URL inside the client-provided ID token. - Example: "https://expected.OIDC.issuer/"' - type: string - x-kubernetes-validations: - - message: oidcIssuer must be a valid URL - rule: isURL(self) - signedEmail: - description: 'signedEmail holds the email address - the the Fulcio certificate is issued for. Example: - "expected-signing-user@example.com"' - type: string - x-kubernetes-validations: - - message: invalid email address - rule: self.matches('^\\S+@\\S+$') - required: - - oidcIssuer - - signedEmail - type: object - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - fulcioCAData - - fulcioSubject - - rekorKeyData - type: object - policyType: - description: policyType serves as the union's discriminator. - Users are required to assign a value to this field, choosing - one of the policy types that define the root of trust. "PublicKey" - indicates that the policy relies on a sigstore publicKey - and may optionally use a Rekor verification. "FulcioCAWithRekor" - indicates that the policy is based on the Fulcio certification - and incorporates a Rekor verification. - enum: - - PublicKey - - FulcioCAWithRekor - type: string - publicKey: - description: publicKey defines the root of trust based on - a sigstore public key. - properties: - keyData: - description: keyData contains inline base64-encoded data - for the PEM format public key. KeyData must be at most - 8192 characters. - format: byte - maxLength: 8192 - type: string - rekorKeyData: - description: rekorKeyData contains inline base64-encoded - data for the PEM format from the Rekor public key. rekorKeyData - must be at most 8192 characters. - format: byte - maxLength: 8192 - type: string - required: - - keyData - type: object - required: - - policyType - type: object - x-kubernetes-validations: - - message: publicKey is required when policyType is PublicKey, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''PublicKey'' - ? has(self.publicKey) : !has(self.publicKey)' - - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, - and forbidden otherwise - rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' - ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' - signedIdentity: - description: signedIdentity specifies what image identity the - signature claims about the image. The required matchPolicy field - specifies the approach used in the verification process to verify - the identity in the signature and the actual image identity, - the default matchPolicy is "MatchRepoDigestOrExact". - properties: - exactRepository: - description: exactRepository is required if matchPolicy is - set to "ExactRepository". - properties: - repository: - description: repository is the reference of the image - identity to be matched. The value should be a repository - name (by omitting the tag or digest) in a registry implementing - the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - repository - type: object - matchPolicy: - description: matchPolicy sets the type of matching to be used. - Valid values are "MatchRepoDigestOrExact", "MatchRepository", - "ExactRepository", "RemapIdentity". When omitted, the default - value is "MatchRepoDigestOrExact". If set matchPolicy to - ExactRepository, then the exactRepository must be specified. - If set matchPolicy to RemapIdentity, then the remapIdentity - must be specified. "MatchRepoDigestOrExact" means that the - identity in the signature must be in the same repository - as the image identity if the image identity is referenced - by a digest. Otherwise, the identity in the signature must - be the same as the image identity. "MatchRepository" means - that the identity in the signature must be in the same repository - as the image identity. "ExactRepository" means that the - identity in the signature must be in the same repository - as a specific identity specified by "repository". "RemapIdentity" - means that the signature must be in the same as the remapped - image identity. Remapped image identity is obtained by replacing - the "prefix" with the specified “signedPrefix†if the the - image identity matches the specified remapPrefix. - enum: - - MatchRepoDigestOrExact - - MatchRepository - - ExactRepository - - RemapIdentity - type: string - remapIdentity: - description: remapIdentity is required if matchPolicy is set - to "RemapIdentity". - properties: - prefix: - description: prefix is the prefix of the image identity - to be matched. If the image identity matches the specified - prefix, that prefix is replaced by the specified “signedPrefix†- (otherwise it is used as unchanged and no remapping - takes place). This useful when verifying signatures - for a mirror of some other repository namespace that - preserves the vendor’s repository structure. The prefix - and signedPrefix values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - signedPrefix: - description: signedPrefix is the prefix of the image identity - to be matched in the signature. The format is the same - as "prefix". The values can be either host[:port] values - (matching exactly the same host[:port], string), repository - namespaces, or repositories (i.e. they must not contain - tags/digests), and match as prefixes of the fully expanded - form. For example, docker.io/library/busybox (not busybox) - to specify that single repository, or docker.io/library - (not an empty string) to specify the parent namespace - of docker.io/library/busybox. - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid repository or prefix in the signedIdentity, - should not include the tag or digest - rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? - self.matches(''^(localhost:[0-9]+)$''): true' - - message: invalid repository or prefix in the signedIdentity - rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') - required: - - prefix - - signedPrefix - type: object - required: - - matchPolicy - type: object - x-kubernetes-validations: - - message: exactRepository is required when matchPolicy is ExactRepository, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') - ? has(self.exactRepository) : !has(self.exactRepository)' - - message: remapIdentity is required when matchPolicy is RemapIdentity, - and forbidden otherwise - rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') - ? has(self.remapIdentity) : !has(self.remapIdentity)' - required: - - rootOfTrust - type: object - scopes: - description: 'scopes defines the list of image identities assigned - to a policy. Each item refers to a scope in a registry implementing - the "Docker Registry HTTP API V2". Scopes matching individual images - are named Docker references in the fully expanded form, either using - a tag or digest. For example, docker.io/library/busybox:latest (not - busybox:latest). More general scopes are prefixes of individual-image - scopes, and specify a repository (by omitting the tag or digest), - a repository namespace, or a registry host (by only specifying the - host name and possibly a port number) or a wildcard expression starting - with `*.`, for matching all subdomains (not including a port number). - Wildcards are only supported for subdomain matching, and may not - be used in the middle of the host, i.e. *.example.com is a valid - case, but example*.*.com is not. Please be aware that the scopes - should not be nested under the repositories of OpenShift Container - Platform images. If configured, the policies for OpenShift Container - Platform repositories will not be in effect. For additional details - about the format, please refer to the document explaining the docker - transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' - items: - maxLength: 512 - type: string - x-kubernetes-validations: - - message: invalid image scope format, scope must contain a fully - qualified domain name or 'localhost' - rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] - == ''localhost'' : true' - - message: invalid image scope with wildcard, a wildcard can only - be at the start of the domain and is only supported for subdomain - matching, not path matching - rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') - : true' - - message: invalid repository namespace or image specification in - the image scope - rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') - : true' - maxItems: 256 - type: array - x-kubernetes-list-type: set - required: - - policy - - scopes - type: object - status: - description: status contains the observed state of the resource. - properties: - conditions: - description: conditions provide details on the status of this API - Resource. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml deleted file mode 100644 index b23e917c9..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1245 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: insightsdatagathers.config.openshift.io -spec: - group: config.openshift.io - names: - kind: InsightsDataGather - listKind: InsightsDataGatherList - plural: insightsdatagathers - singular: insightsdatagather - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: "InsightsDataGather provides data gather configuration options - for the the Insights Operator. \n Compatibility level 4: No compatibility - is provided, the API can change at any point for any reason. These capabilities - should not be used by applications needing long term support." - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - gatherConfig: - description: gatherConfig spec attribute includes all the configuration - options related to gathering of the Insights data and its uploading - to the ingress. - properties: - dataPolicy: - description: dataPolicy allows user to enable additional global - obfuscation of the IP addresses and base domain in the Insights - archive data. Valid values are "None" and "ObfuscateNetworking". - When set to None the data is not obfuscated. When set to ObfuscateNetworking - the IP addresses and the cluster domain name are obfuscated. - When omitted, this means no opinion and the platform is left - to choose a reasonable default, which is subject to change over - time. The current default is None. - enum: - - "" - - None - - ObfuscateNetworking - type: string - disabledGatherers: - description: 'disabledGatherers is a list of gatherers to be excluded - from the gathering. All the gatherers can be disabled by providing - "all" value. If all the gatherers are disabled, the Insights - operator does not gather any data. The particular gatherers - IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. - Run the following command to get the names of last active gatherers: - "oc get insightsoperators.operator.openshift.io cluster -o json - | jq ''.status.gatherStatus.gatherers[].name''" An example of - disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", - "workloads/workload_info"]`' - items: - type: string - type: array - type: object - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml deleted file mode 100644 index 232bdf037..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/custom.clusterimagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ClusterImagePolicy" -crd: 0000_10_config-operator_01_clusterimagepolicy-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml deleted file mode 100644 index 05b1487fa..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/custom.imagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[CustomNoUpgrade] ImagePolicy" -crd: 0000_10_config-operator_01_imagepolicy-CustomNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml deleted file mode 100644 index 91836dd93..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml +++ /dev/null @@ -1,202 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreview] Backup" -crd: 0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a Backup with a valid spec - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - pvcName: etcdbackup-pvc - - name: Should be able to create an EtcdBackup without the pvcName specified - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* 2 * * *" - - name: Should be able to create a Backup with a valid schedule - At 22:00 on every day-of-week from Monday through Friday - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "0 22 * * 1-5" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "0 22 * * 1-5" - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid schedule - At 04:05 on Sunday. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * SUN" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * SUN" - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid schedule - Predefined hourly - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourly" - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourly" - pvcName: etcdbackup-pvc - - name: Should fail to create an EtcdBackup with an invalid schedule - At 04:05 on invalid day FOO. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "5 4 * * FOO" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Predefined typo @hourli instead of @hourly. - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "@hourli" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L last Friday in month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * * * 5L" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L 5th day before last day of month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * L-5 * *" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard W closest weekday to 15th of month - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - schedule: "* * 15W * *" - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.schedule in body should match" - - name: Should be able to create a Backup with a valid time zone - Africa/Banjul - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Africa/Banjul - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Africa/Banjul - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - Etc/GMT-8 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/GMT-8 - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/GMT-8 - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - Etc/UTC - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/UTC - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: Etc/UTC - pvcName: etcdbackup-pvc - - name: Should be able to create a Backup with a valid time zone - America/Argentina/Catamarca - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: America/Argentina/Catamarca - pvcName: etcdbackup-pvc - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: America/Argentina/Catamarca - pvcName: etcdbackup-pvc - - name: Should fail to create an EtcdBackup with an invalid time zone - GMT2 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: GMT2 - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.timeZone in body should match" - - name: Should fail to create an EtcdBackup with an invalid time zone - GMT+3 - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: Backup - spec: - etcd: - timeZone: GMT+3 - pvcName: etcdbackup-pvc - expectedError: "spec.etcd.timeZone in body should match" - diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml deleted file mode 100644 index 625310667..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.clusterimagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ClusterImagePolicy" -crd: 0000_10_config-operator_01_clusterimagepolicy-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ClusterImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml deleted file mode 100644 index b469d4c52..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.imagepolicy.testsuite.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreviewNoUpgrade] ImagePolicy" -crd: 0000_10_config-operator_01_imagepolicy-TechPreviewNoUpgrade.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal ImagePolicy with policyType PublicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Should be able to create a minimal ImagePolicy with policyType FulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - fulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - - name: Should not allow policyType PublicKey but not set publicKey - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - FulcioCAWithRekor: - fulcioCAData: Zm9vIGJhcg== - rekorKeyData: Zm9vIGJhcg== - fulcioSubject: - oidcIssuer: https://oidc.localhost - signedEmail: test-user@example.com - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType FulcioCAData but not set fulcioCAWithRekor - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - PublicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow policyType set but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": publicKey is required when policyType is PublicKey, and forbidden otherwise" - - name: Should not allow policyType set FulcioCAWith but not set corresponding policy - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: FulcioCAWithRekor - expectedError: "spec.policy.rootOfTrust: Invalid value: \"object\": fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy ExactRepository but not set repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" - - name: Should not allow signedIdentity matchPolicy RemapIdentity but not set prefixes - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - expectedError: "spec.policy.signedIdentity: Invalid value: \"object\": remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" - - name: Test scope should not allow 'busybox' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - busybox - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" - - name: Test scope should not allow start with subnamesapces '*.example.com/test' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com/test" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" - - name: Test scope should not allow invalid digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:12dsdf - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expectedError: "spec.scopes[0]: Invalid value: \"string\": invalid repository namespace or image specification in the image scope" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace:latest - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow digest in ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: localhost:1234/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - expectedError: "[spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.exactRepository.repository: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should not allow tag in prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com/namespace:latest - signedPrefix: example.com/namespace - expectedError: "[spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity, should not include the tag or digest, spec.policy.signedIdentity.remapIdentity.prefix: Invalid value: \"string\": invalid repository or prefix in the signedIdentity]" - - name: Test should allow valid ExactRepository repository - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: ExactRepository - exactRepository: - repository: example.com - - name: Test should allow valid signedIdentity prefix/signedPrefix - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - signedIdentity: - matchPolicy: RemapIdentity - remapIdentity: - prefix: example.com - signedPrefix: mirror.com - - name: Test scope should allow localhost name with port 'localhost:1234/namespace/namespace' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost:1234/namespace/namespace - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow localhost 'localhost/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - localhost/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow 'example.com/foo/bar' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow tag 'example.com/foo/bar:latest' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/foo/bar:latest - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow full specification digest - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - example.com/namespace/namespace@sha256:b7e686e30346e9ace664fa09c0275262f8b9a443ed56d22165a0e201f6488c13 - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - - name: Test scope should allow '*.example.com' - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: ImagePolicy - spec: - scopes: - - "*.example.com" - policy: - rootOfTrust: - policyType: PublicKey - publicKey: - keyData: Zm9vIGJhcg== diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml deleted file mode 100644 index f73792738..000000000 --- a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] InsightsDataGather" -crd: 0000_10_config-operator_01_insightsdatagather.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal InsightsDataGather - initial: | - apiVersion: config.openshift.io/v1alpha1 - kind: InsightsDataGather - spec: {} # No spec is required for a InsightsDataGather - expected: | - apiVersion: config.openshift.io/v1alpha1 - kind: InsightsDataGather - spec: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index 9af55b540..65eb5c1f7 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Backup provides configuration for performing backups of the openshift cluster. // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=backups,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1482 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=AutomatedEtcdBackup // +openshift:compatibility-gen:level=4 type Backup struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index 5c44e0e74..c503fdeab 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // ClusterImagePolicy holds cluster-wide configuration for image signature verification // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=ImagePolicy // +openshift:compatibility-gen:level=4 type ClusterImagePolicy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index e9ec90902..247bab218 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -8,6 +8,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // ImagePolicy holds namespace-wide configuration for image signature verification // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=ImagePolicy // +openshift:compatibility-gen:level=4 type ImagePolicy struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 4dcdb2ec4..171e96d5b 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -9,6 +9,12 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // InsightsDataGather provides data gather configuration options for the the Insights Operator. // // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1245 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig // +openshift:compatibility-gen:level=4 type InsightsDataGather struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..9b5744d4a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,92 @@ +backups.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1482 + CRDName: backups.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - AutomatedEtcdBackup + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: Backup + Labels: {} + PluralName: backups + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - AutomatedEtcdBackup + Version: v1alpha1 + +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImagePolicy + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ImagePolicy + Version: v1alpha1 + +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1457 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ImagePolicy + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - ImagePolicy + Version: v1alpha1 + +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1245 + CRDName: insightsdatagathers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1alpha1 + diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml deleted file mode 100644 index a533efbc1..000000000 --- a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml +++ /dev/null @@ -1,365 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: securitycontextconstraints.security.openshift.io -spec: - group: security.openshift.io - names: - kind: SecurityContextConstraints - listKind: SecurityContextConstraintsList - plural: securitycontextconstraints - singular: securitycontextconstraints - scope: Cluster - versions: - - additionalPrinterColumns: - - description: Determines if a container can request to be run as privileged - jsonPath: .allowPrivilegedContainer - name: Priv - type: string - - description: A list of capabilities that can be requested to add to the container - jsonPath: .allowedCapabilities - name: Caps - type: string - - description: Strategy that will dictate what labels will be set in the SecurityContext - jsonPath: .seLinuxContext.type - name: SELinux - type: string - - description: Strategy that will dictate what RunAsUser is used in the SecurityContext - jsonPath: .runAsUser.type - name: RunAsUser - type: string - - description: Strategy that will dictate what fs group is used by the SecurityContext - jsonPath: .fsGroup.type - name: FSGroup - type: string - - description: Strategy that will dictate what supplemental groups are used by - the SecurityContext - jsonPath: .supplementalGroups.type - name: SupGroup - type: string - - description: Sort order of SCCs - jsonPath: .priority - name: Priority - type: string - - description: Force containers to run with a read only root file system - jsonPath: .readOnlyRootFilesystem - name: ReadOnlyRootFS - type: string - - description: White list of allowed volume plugins - jsonPath: .volumes - name: Volumes - type: string - name: v1 - schema: - openAPIV3Schema: - description: "SecurityContextConstraints governs the ability to make requests - that affect the SecurityContext that will be applied to a container. For - historical reasons SCC was exposed under the core Kubernetes API group. - That exposure is deprecated and will be removed in a future release - users - should instead use the security.openshift.io group to manage SecurityContextConstraints. - \n Compatibility level 1: Stable within a major release for a minimum of - 12 months or 3 minor releases (whichever is longer)." - properties: - allowHostDirVolumePlugin: - description: AllowHostDirVolumePlugin determines if the policy allow containers - to use the HostDir volume plugin - type: boolean - allowHostIPC: - description: AllowHostIPC determines if the policy allows host ipc in - the containers. - type: boolean - allowHostNetwork: - description: AllowHostNetwork determines if the policy allows the use - of HostNetwork in the pod spec. - type: boolean - allowHostPID: - description: AllowHostPID determines if the policy allows host pid in - the containers. - type: boolean - allowHostPorts: - description: AllowHostPorts determines if the policy allows host ports - in the containers. - type: boolean - allowPrivilegeEscalation: - description: AllowPrivilegeEscalation determines if a pod can request - to allow privilege escalation. If unspecified, defaults to true. - nullable: true - type: boolean - allowPrivilegedContainer: - description: AllowPrivilegedContainer determines if a container can request - to be run as privileged. - type: boolean - allowedCapabilities: - description: AllowedCapabilities is a list of capabilities that can be - requested to add to the container. Capabilities in this field maybe - added at the pod author's discretion. You must not list a capability - in both AllowedCapabilities and RequiredDropCapabilities. To allow all - capabilities you may use '*'. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - allowedFlexVolumes: - description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty - or nil indicates that all Flexvolumes may be used. This parameter is - effective only when the usage of the Flexvolumes is allowed in the "Volumes" - field. - items: - description: AllowedFlexVolume represents a single Flexvolume that is - allowed to be used. - properties: - driver: - description: Driver is the name of the Flexvolume driver. - type: string - required: - - driver - type: object - nullable: true - type: array - allowedUnsafeSysctls: - description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe - sysctls, defaults to none. Each entry is either a plain sysctl name - or ends in \"*\" in which case it is considered as a prefix of allowed - sysctls. Single * means all unsafe sysctls are allowed. Kubelet has - to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. - \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." - items: - type: string - nullable: true - type: array - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - defaultAddCapabilities: - description: DefaultAddCapabilities is the default set of capabilities - that will be added to the container unless the pod spec specifically - drops the capability. You may not list a capabiility in both DefaultAddCapabilities - and RequiredDropCapabilities. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - defaultAllowPrivilegeEscalation: - description: DefaultAllowPrivilegeEscalation controls the default setting - for whether a process can gain more privileges than its parent process. - nullable: true - type: boolean - forbiddenSysctls: - description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, - defaults to none. Each entry is either a plain sysctl name or ends in - \"*\" in which case it is considered as a prefix of forbidden sysctls. - Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" - forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", - \"foo.baz\", etc." - items: - type: string - nullable: true - type: array - fsGroup: - description: FSGroup is the strategy that will dictate what fs group is - used by the SecurityContext. - nullable: true - properties: - ranges: - description: Ranges are the allowed ranges of fs groups. If you would - like to force a single fs group then supply a single range with - the same start and end. - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' - properties: - max: - description: Max is the end of the range, inclusive. - format: int64 - type: integer - min: - description: Min is the start of the range, inclusive. - format: int64 - type: integer - type: object - type: array - type: - description: Type is the strategy that will dictate what FSGroup is - used in the SecurityContext. - type: string - type: object - groups: - description: The groups that have permission to use this security context - constraints - items: - type: string - nullable: true - type: array - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - priority: - description: Priority influences the sort order of SCCs when evaluating - which SCCs to try first for a given pod request based on access in the - Users and Groups fields. The higher the int, the higher priority. An - unset value is considered a 0 priority. If scores for multiple SCCs - are equal they will be sorted from most restrictive to least restrictive. - If both priorities and restrictions are equal the SCCs will be sorted - by name. - format: int32 - nullable: true - type: integer - readOnlyRootFilesystem: - description: ReadOnlyRootFilesystem when set to true will force containers - to run with a read only root file system. If the container specifically - requests to run with a non-read only root file system the SCC should - deny the pod. If set to false the container may run with a read only - root file system if it wishes but it will not be forced to. - type: boolean - requiredDropCapabilities: - description: RequiredDropCapabilities are the capabilities that will be - dropped from the container. These are required to be dropped and cannot - be added. - items: - description: Capability represent POSIX capabilities type - type: string - nullable: true - type: array - runAsUser: - description: RunAsUser is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - nullable: true - properties: - type: - description: Type is the strategy that will dictate what RunAsUser - is used in the SecurityContext. - type: string - uid: - description: UID is the user id that containers must run as. Required - for the MustRunAs strategy if not using namespace/service account - allocated uids. - format: int64 - type: integer - uidRangeMax: - description: UIDRangeMax defines the max value for a strategy that - allocates by range. - format: int64 - type: integer - uidRangeMin: - description: UIDRangeMin defines the min value for a strategy that - allocates by range. - format: int64 - type: integer - type: object - seLinuxContext: - description: SELinuxContext is the strategy that will dictate what labels - will be set in the SecurityContext. - nullable: true - properties: - seLinuxOptions: - description: seLinuxOptions required to run as; required for MustRunAs - properties: - level: - description: Level is SELinux level label that applies to the - container. - type: string - role: - description: Role is a SELinux role label that applies to the - container. - type: string - type: - description: Type is a SELinux type label that applies to the - container. - type: string - user: - description: User is a SELinux user label that applies to the - container. - type: string - type: object - type: - description: Type is the strategy that will dictate what SELinux context - is used in the SecurityContext. - type: string - type: object - seccompProfiles: - description: "SeccompProfiles lists the allowed profiles that may be set - for the pod or container's seccomp annotations. An unset (nil) or empty - value means that no profiles may be specifid by the pod or container.\tThe - wildcard '*' may be used to allow all profiles. When used to generate - a value for a pod the first non-wildcard profile will be used as the - default." - items: - type: string - nullable: true - type: array - supplementalGroups: - description: SupplementalGroups is the strategy that will dictate what - supplemental groups are used by the SecurityContext. - nullable: true - properties: - ranges: - description: Ranges are the allowed ranges of supplemental groups. If - you would like to force a single supplemental group then supply - a single range with the same start and end. - items: - description: 'IDRange provides a min/max of an allowed range of - IDs. TODO: this could be reused for UIDs.' - properties: - max: - description: Max is the end of the range, inclusive. - format: int64 - type: integer - min: - description: Min is the start of the range, inclusive. - format: int64 - type: integer - type: object - type: array - type: - description: Type is the strategy that will dictate what supplemental - groups is used in the SecurityContext. - type: string - type: object - users: - description: The users who have permissions to use this security context - constraints - items: - type: string - nullable: true - type: array - volumes: - description: Volumes is a white list of allowed volume plugins. FSType - corresponds directly with the field names of a VolumeSource (azureFile, - configMap, emptyDir). To allow all volumes you may use "*". To allow - no volumes, set to ["none"]. - items: - description: FS Type gives strong typing to different file systems that - are used by volumes. - type: string - nullable: true - type: array - required: - - allowHostDirVolumePlugin - - allowHostIPC - - allowHostNetwork - - allowHostPID - - allowHostPorts - - allowPrivilegedContainer - - allowedCapabilities - - defaultAddCapabilities - - priority - - readOnlyRootFilesystem - - requiredDropCapabilities - - volumes - type: object - served: true - storage: true diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index d842079a0..c6d60915d 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -195,15 +195,19 @@ message SELinuxContextStrategyOptions { // SecurityContextConstraints. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" -// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" -// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" -// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" -// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" -// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" -// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" -// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" -// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 message SecurityContextConstraints { diff --git a/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml b/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml deleted file mode 100644 index d663b94c2..000000000 --- a/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[Stable] SecurityContextConstraints" -crd: 0000_03_security-openshift_01_scc.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal SecurityContextConstraints - initial: | - apiVersion: security.openshift.io/v1 - kind: SecurityContextConstraints - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegedContainer: false - allowedCapabilities: [] - defaultAddCapabilities: [] - priority: 0 - readOnlyRootFilesystem: false - requiredDropCapabilities: [] - volumes: [] - expected: | - apiVersion: security.openshift.io/v1 - kind: SecurityContextConstraints - allowHostDirVolumePlugin: false - allowHostIPC: false - allowHostNetwork: false - allowHostPID: false - allowHostPorts: false - allowPrivilegedContainer: false - allowedCapabilities: [] - defaultAddCapabilities: [] - priority: 0 - readOnlyRootFilesystem: false - requiredDropCapabilities: [] - volumes: [] diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 3e208210c..b57da3058 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -22,15 +22,19 @@ var AllowAllCapabilities corev1.Capability = "*" // SecurityContextConstraints. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" -// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" -// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" -// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" -// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" -// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" -// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" -// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" -// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=securitycontextconstraints,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 +// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=.allowPrivilegedContainer,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=.allowedCapabilities,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=.seLinuxContext.type,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=.runAsUser.type,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=.fsGroup.type,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=.supplementalGroups.type,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=.priority,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=.readOnlyRootFilesystem,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=.volumes,description="White list of allowed volume plugins" // +kubebuilder:singular=securitycontextconstraint // +openshift:compatibility-gen:level=1 type SecurityContextConstraints struct { diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 000000000..ea3967ab2 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,58 @@ +securitycontextconstraints.security.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/470 + CRDName: securitycontextconstraints.security.openshift.io + Capability: "" + Category: "" + FeatureGates: [] + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_03" + GroupName: security.openshift.io + HasStatus: false + KindName: SecurityContextConstraints + Labels: {} + PluralName: securitycontextconstraints + PrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the + SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: [] + Version: v1 + diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go new file mode 100644 index 000000000..93866293e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// NetworkDiagnosticsApplyConfiguration represents an declarative configuration of the NetworkDiagnostics type for use +// with apply. +type NetworkDiagnosticsApplyConfiguration struct { + Mode *v1.NetworkDiagnosticsMode `json:"mode,omitempty"` + SourcePlacement *NetworkDiagnosticsSourcePlacementApplyConfiguration `json:"sourcePlacement,omitempty"` + TargetPlacement *NetworkDiagnosticsTargetPlacementApplyConfiguration `json:"targetPlacement,omitempty"` +} + +// NetworkDiagnosticsApplyConfiguration constructs an declarative configuration of the NetworkDiagnostics type for use with +// apply. +func NetworkDiagnostics() *NetworkDiagnosticsApplyConfiguration { + return &NetworkDiagnosticsApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value v1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration { + b.Mode = &value + return b +} + +// WithSourcePlacement sets the SourcePlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourcePlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithSourcePlacement(value *NetworkDiagnosticsSourcePlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.SourcePlacement = value + return b +} + +// WithTargetPlacement sets the TargetPlacement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPlacement field is set to the value of the last call. +func (b *NetworkDiagnosticsApplyConfiguration) WithTargetPlacement(value *NetworkDiagnosticsTargetPlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration { + b.TargetPlacement = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go new file mode 100644 index 000000000..efe6bbd49 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsSourcePlacementApplyConfiguration represents an declarative configuration of the NetworkDiagnosticsSourcePlacement type for use +// with apply. +type NetworkDiagnosticsSourcePlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsSourcePlacementApplyConfiguration constructs an declarative configuration of the NetworkDiagnosticsSourcePlacement type for use with +// apply. +func NetworkDiagnosticsSourcePlacement() *NetworkDiagnosticsSourcePlacementApplyConfiguration { + return &NetworkDiagnosticsSourcePlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go new file mode 100644 index 000000000..c1ce2d8e9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go @@ -0,0 +1,44 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// NetworkDiagnosticsTargetPlacementApplyConfiguration represents an declarative configuration of the NetworkDiagnosticsTargetPlacement type for use +// with apply. +type NetworkDiagnosticsTargetPlacementApplyConfiguration struct { + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` +} + +// NetworkDiagnosticsTargetPlacementApplyConfiguration constructs an declarative configuration of the NetworkDiagnosticsTargetPlacement type for use with +// apply. +func NetworkDiagnosticsTargetPlacement() *NetworkDiagnosticsTargetPlacementApplyConfiguration { + return &NetworkDiagnosticsTargetPlacementApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go index c74dc4d0c..5d218d02c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go @@ -10,6 +10,7 @@ type NetworkSpecApplyConfiguration struct { NetworkType *string `json:"networkType,omitempty"` ExternalIP *ExternalIPConfigApplyConfiguration `json:"externalIP,omitempty"` ServiceNodePortRange *string `json:"serviceNodePortRange,omitempty"` + NetworkDiagnostics *NetworkDiagnosticsApplyConfiguration `json:"networkDiagnostics,omitempty"` } // NetworkSpecApplyConfiguration constructs an declarative configuration of the NetworkSpec type for use with @@ -64,3 +65,11 @@ func (b *NetworkSpecApplyConfiguration) WithServiceNodePortRange(value string) * b.ServiceNodePortRange = &value return b } + +// WithNetworkDiagnostics sets the NetworkDiagnostics field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkDiagnostics field is set to the value of the last call. +func (b *NetworkSpecApplyConfiguration) WithNetworkDiagnostics(value *NetworkDiagnosticsApplyConfiguration) *NetworkSpecApplyConfiguration { + b.NetworkDiagnostics = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 64d6d2ae7..28f42faf9 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -2022,6 +2022,49 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.NetworkStatus default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnostics + map: + fields: + - name: mode + type: + scalar: string + default: "" + - name: sourcePlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + default: {} + - name: targetPlacement + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + default: {} +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic - name: com.github.openshift.api.config.v1.NetworkMigration map: fields: @@ -2043,6 +2086,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: externalIP type: namedType: com.github.openshift.api.config.v1.ExternalIPConfig + - name: networkDiagnostics + type: + namedType: com.github.openshift.api.config.v1.NetworkDiagnostics + default: {} - name: networkType type: scalar: string diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go index ef8b2c8f2..5e9aa84c0 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -7,6 +7,7 @@ import ( "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -140,6 +141,35 @@ type GrpcPodConfig struct { // +kubebuilder:validation:Enum=legacy;restricted // +kubebuilder:default:=legacy SecurityContextConfig SecurityConfig `json:"securityContextConfig,omitempty"` + + // MemoryTarget configures the $GOMEMLIMIT value for the gRPC catalog Pod. This is a soft memory limit for the server, + // which the runtime will attempt to meet but makes no guarantees that it will do so. If this value is set, the Pod + // will have the following modifications made to the container running the server: + // - the $GOMEMLIMIT environment variable will be set to this value in bytes + // - the memory request will be set to this value + // + // This field should be set if it's desired to reduce the footprint of a catalog server as much as possible, or if + // a catalog being served is very large and needs more than the default allocation. If your index image has a file- + // system cache, determine a good approximation for this value by doubling the size of the package cache at + // /tmp/cache/cache/packages.json in the index image. + // + // This field is best-effort; if unset, no default will be used and no Pod memory limit or $GOMEMLIMIT value will be set. + // +optional + MemoryTarget *resource.Quantity `json:"memoryTarget,omitempty"` + + // ExtractContent configures the gRPC catalog Pod to extract catalog metadata from the provided index image and + // use a well-known version of the `opm` server to expose it. The catalog index image that this CatalogSource is + // configured to use *must* be using the file-based catalogs in order to utilize this feature. + // +optional + ExtractContent *ExtractContentConfig `json:"extractContent,omitempty"` +} + +// ExtractContentConfig configures context extraction from a file-based catalog index image. +type ExtractContentConfig struct { + // CacheDir is the directory storing the pre-calculated API cache. + CacheDir string `json:"cacheDir"` + // CatalogDir is the directory storing the file-based catalog contents. + CatalogDir string `json:"catalogDir"` } // UpdateStrategy holds all the different types of catalog source update strategies @@ -209,9 +239,12 @@ type CatalogSourceStatus struct { // The last time the CatalogSource image registry has been polled to ensure the image is up-to-date LatestImageRegistryPoll *metav1.Time `json:"latestImageRegistryPoll,omitempty"` - ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` - RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` - GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` + // ConfigMapReference (deprecated) is the reference to the ConfigMap containing the catalog source's configuration, when the catalog source is a ConfigMap + ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` + // RegistryService represents the current state of the GRPC service used to serve the catalog + RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` + // ConnectionState represents the current state of the CatalogSource's connection to the registry + GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` // Represents the state of a CatalogSource. Note that Message and Reason represent the original // status information, which may be migrated to be conditions based in the future. Any new features diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go index ffc357b12..a4c8d1746 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go @@ -120,12 +120,19 @@ func (c *ClusterServiceVersion) IsObsolete() bool { // IsCopied returns true if the CSV has been copied and false otherwise. func (c *ClusterServiceVersion) IsCopied() bool { - operatorNamespace, ok := c.GetAnnotations()[OperatorGroupNamespaceAnnotationKey] - if c.Status.Reason == CSVReasonCopied || ok && c.GetNamespace() != operatorNamespace { - return true + return c.Status.Reason == CSVReasonCopied || IsCopied(c) +} + +func IsCopied(o metav1.Object) bool { + annotations := o.GetAnnotations() + if annotations != nil { + operatorNamespace, ok := annotations[OperatorGroupNamespaceAnnotationKey] + if ok && o.GetNamespace() != operatorNamespace { + return true + } } - if labels := c.GetLabels(); labels != nil { + if labels := o.GetLabels(); labels != nil { if _, ok := labels[CopiedLabelKey]; ok { return true } diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go index 2452f9a1c..292fedf9b 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -90,6 +90,13 @@ type SubscriptionConfig struct { // Use empty object ({}) to erase original sub-attribute values. // +optional Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + + // Annotations is an unstructured key value map stored with each Deployment, Pod, APIService in the Operator. + // Typically, annotations may be set by external tools to store and retrieve arbitrary metadata. + // Use this field to pre-define annotations that OLM should add to each of the Subscription's + // deployments, pods, and apiservices. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` } // SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true" @@ -117,6 +124,19 @@ const ( // SubscriptionBundleUnpackFailed indicates that the unpack job failed SubscriptionBundleUnpackFailed SubscriptionConditionType = "BundleUnpackFailed" + + // SubscriptionDeprecated is a roll-up condition which indicates that the Operator currently installed with this Subscription + //has been deprecated. It will be present when any of the three deprecation types (Package, Channel, Bundle) are present. + SubscriptionDeprecated SubscriptionConditionType = "Deprecated" + + // SubscriptionOperatorDeprecated indicates that the Package currently installed with this Subscription has been deprecated. + SubscriptionPackageDeprecated SubscriptionConditionType = "PackageDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Channel used with this Subscription has been deprecated. + SubscriptionChannelDeprecated SubscriptionConditionType = "ChannelDeprecated" + + // SubscriptionOperatorDeprecated indicates that the Bundle currently installed with this Subscription has been deprecated. + SubscriptionBundleDeprecated SubscriptionConditionType = "BundleDeprecated" ) const ( diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index cf01001f5..684a7432a 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* @@ -690,6 +689,21 @@ func (in *DependentStatus) DeepCopy() *DependentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtractContentConfig) DeepCopyInto(out *ExtractContentConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtractContentConfig. +func (in *ExtractContentConfig) DeepCopy() *ExtractContentConfig { + if in == nil { + return nil + } + out := new(ExtractContentConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { *out = *in @@ -733,6 +747,16 @@ func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) { *out = new(string) **out = **in } + if in.MemoryTarget != nil { + in, out := &in.MemoryTarget, &out.MemoryTarget + x := (*in).DeepCopy() + *out = &x + } + if in.ExtractContent != nil { + in, out := &in.ExtractContent, &out.ExtractContent + *out = new(ExtractContentConfig) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig. @@ -1404,6 +1428,13 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { *out = new(v1.Affinity) (*in).DeepCopyInto(*out) } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go index a9914fb1a..6f4298483 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/register.go @@ -14,6 +14,12 @@ package monitoring -const ( +// GroupName is set to var instead of const, since this provides the ability for clients importing the module - +// github.com/prometheus-operator/prometheus-operator/pkg/apis to manage the operator's objects in a different +// API group +// +// Use `ldflags` in the client side, e.g.: +// go run -ldflags="-s -X github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring.GroupName=monitoring.example.com" ./example/client/. +var ( GroupName = "monitoring.coreos.com" ) diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go new file mode 100644 index 000000000..25736ce92 --- /dev/null +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/resource.go @@ -0,0 +1,60 @@ +// Copyright 2018 The prometheus-operator Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +import ( + "fmt" +) + +const ( + PrometheusesKind = "Prometheus" + PrometheusName = "prometheuses" + + AlertmanagersKind = "Alertmanager" + AlertmanagerName = "alertmanagers" + + ServiceMonitorsKind = "ServiceMonitor" + ServiceMonitorName = "servicemonitors" + + PodMonitorsKind = "PodMonitor" + PodMonitorName = "podmonitors" + + PrometheusRuleKind = "PrometheusRule" + PrometheusRuleName = "prometheusrules" + + ProbesKind = "Probe" + ProbeName = "probes" + + ScrapeConfigsKind = "ScrapeConfig" + ScrapeConfigName = "scrapeconfigs" +) + +var resourceToKindMap = map[string]string{ + PrometheusName: PrometheusesKind, + AlertmanagerName: AlertmanagersKind, + ServiceMonitorName: ServiceMonitorsKind, + PodMonitorName: PodMonitorsKind, + PrometheusRuleName: PrometheusRuleKind, + ProbeName: ProbesKind, + ScrapeConfigName: ScrapeConfigsKind, +} + +func ResourceToKind(s string) string { + kind, found := resourceToKindMap[s] + if !found { + panic(fmt.Sprintf("failed to map resource %q to a kind", s)) + } + return kind +} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go index 8f44c9d31..f99dedf43 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go @@ -60,7 +60,15 @@ func (l *Alertmanager) DeepCopyObject() runtime.Object { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerSpec struct { - // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. + // PodMetadata configures labels and annotations which are propagated to the Alertmanager pods. + // + // The following items are reserved and cannot be overridden: + // * "alertmanager" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/instance" label, set to the name of the Alertmanager instance. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "alertmanager". + // * "app.kubernetes.io/version" label, set to the Alertmanager version. + // * "kubectl.kubernetes.io/default-container" annotation, set to "alertmanager". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the @@ -75,17 +83,15 @@ type AlertmanagerSpec struct { Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. - // Deprecated: use 'image' instead. The image tag can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image tag can be specified as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. - // Deprecated: use 'image' instead. The image digest can be specified - // as part of the image URL. + // Deprecated: use 'image' instead. The image digest can be specified as part of the image URL. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. - // Deprecated: use 'image' instead + // Deprecated: use 'image' instead. BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries @@ -115,10 +121,10 @@ type AlertmanagerSpec struct { // receiver (effectively dropping alert notifications). ConfigSecret string `json:"configSecret,omitempty"` // Log level for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` // Log format for Alertmanager to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected @@ -200,6 +206,9 @@ type AlertmanagerSpec struct { ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` // Interval between gossip attempts. ClusterGossipInterval GoDuration `json:"clusterGossipInterval,omitempty"` + // Defines the identifier that uniquely identifies the Alertmanager cluster. + // You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + ClusterLabel *string `json:"clusterLabel,omitempty"` // Interval between pushpull attempts. ClusterPushpullInterval GoDuration `json:"clusterPushpullInterval,omitempty"` // Timeout for cluster peering. @@ -231,10 +240,27 @@ type AlertmanagerSpec struct { HostAliases []HostAlias `json:"hostAliases,omitempty"` // Defines the web command line flags when starting Alertmanager. Web *AlertmanagerWebSpec `json:"web,omitempty"` - // EXPERIMENTAL: alertmanagerConfiguration specifies the configuration of Alertmanager. + // alertmanagerConfiguration specifies the configuration of Alertmanager. + // // If defined, it takes precedence over the `configSecret` field. - // This field may change in future releases. + // + // This is an *experimental feature*, it may change in any upcoming release + // in a breaking way. + // + //+optional AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. + // If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + // Enable access to Alertmanager feature flags. By default, no features are enabled. + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // It requires Alertmanager >= 0.27.0. + // +optional + EnableFeatures []string `json:"enableFeatures,omitempty"` } // AlertmanagerConfigMatcherStrategy defines the strategy used by AlertmanagerConfig objects to match alerts. @@ -266,6 +292,10 @@ type AlertmanagerConfiguration struct { // AlertmanagerGlobalConfig configures parameters that are valid in all other configuration contexts. // See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file type AlertmanagerGlobalConfig struct { + // Configures global SMTP parameters. + // +optional + SMTPConfig *GlobalSMTPConfig `json:"smtp,omitempty"` + // ResolveTimeout is the default value used by alertmanager if the alert does // not include EndsAt, after this time passes it can declare the alert as resolved if it has not been updated. // This has no impact on alerts from Prometheus, as they always include EndsAt. @@ -282,6 +312,9 @@ type AlertmanagerGlobalConfig struct { // The default OpsGenie API Key. OpsGenieAPIKey *v1.SecretKeySelector `json:"opsGenieApiKey,omitempty"` + + // The default Pagerduty URL. + PagerdutyURL *string `json:"pagerdutyUrl,omitempty"` } // AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. @@ -336,6 +369,53 @@ type AlertmanagerWebSpec struct { Timeout *uint32 `json:"timeout,omitempty"` } +// GlobalSMTPConfig configures global SMTP parameters. +// See https://prometheus.io/docs/alerting/latest/configuration/#configuration-file +type GlobalSMTPConfig struct { + // The default SMTP From header field. + // +optional + From *string `json:"from,omitempty"` + + // The default SMTP smarthost used for sending emails. + // +optional + SmartHost *HostPort `json:"smartHost,omitempty"` + + // The default hostname to identify to the SMTP server. + // +optional + Hello *string `json:"hello,omitempty"` + + // SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. + // +optional + AuthUsername *string `json:"authUsername,omitempty"` + + // SMTP Auth using LOGIN and PLAIN. + // +optional + AuthPassword *v1.SecretKeySelector `json:"authPassword,omitempty"` + + // SMTP Auth using PLAIN + // +optional + AuthIdentity *string `json:"authIdentity,omitempty"` + + // SMTP Auth using CRAM-MD5. + // +optional + AuthSecret *v1.SecretKeySelector `json:"authSecret,omitempty"` + + // The default SMTP TLS requirement. + // Note that Go does not support unencrypted connections to remote SMTP endpoints. + // +optional + RequireTLS *bool `json:"requireTLS,omitempty"` +} + +// HostPort represents a "host:port" network address. +type HostPort struct { + // Defines the host's address, it can be a DNS name or a literal IP address. + // +kubebuilder:validation:MinLength=1 + Host string `json:"host"` + // Defines the host's port, it can be a literal port number or a port name. + // +kubebuilder:validation:MinLength=1 + Port string `json:"port"` +} + // HTTPConfig defines a client HTTP configuration. // See https://prometheus.io/docs/alerting/latest/configuration/#http_config type HTTPConfig struct { diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go index 630959919..c6a78fd09 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go @@ -48,31 +48,102 @@ func (l *PodMonitor) DeepCopyObject() runtime.Object { // +k8s:openapi-gen=true type PodMonitorSpec struct { // The label to use to retrieve the job name from. + // `jobLabel` selects the label from the associated Kubernetes `Pod` + // object which will be used as the `job` label for all metrics. + // + // For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` + // object is labeled with `foo: bar`, then Prometheus adds the `job="bar"` + // label to all ingested metrics. + // + // If the value of this field is empty, the `job` label of the metrics + // defaults to the namespace and name of the PodMonitor object (e.g. `/`). JobLabel string `json:"jobLabel,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + + // `podTargetLabels` defines the labels which are transferred from the + // associated Kubernetes `Pod` object onto the ingested metrics. + // PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // A list of endpoints allowed as part of this PodMonitor. + + // List of endpoints part of this PodMonitor. + // + // +optional PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` - // Selector to select Pod objects. + + // Label selector to select the Kubernetes `Pod` objects. Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Endpoints objects are discovered from. + // Selector to select which namespaces the Kubernetes `Pods` objects + // are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - SampleLimit uint64 `json:"sampleLimit,omitempty"` - // TargetLimit defines a limit on the number of scraped targets that will be accepted. - TargetLimit uint64 `json:"targetLimit,omitempty"` + + // `sampleLimit` defines a per-scrape limit on the number of scraped samples + // that will be accepted. + // + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` + + // `targetLimit` defines a limit on the number of scraped targets that will + // be accepted. + // + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // Per-scrape limit on number of labels that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelLimit uint64 `json:"labelLimit,omitempty"` + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelNameLengthLimit uint64 `json:"labelNameLengthLimit,omitempty"` + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. - // Only valid in Prometheus versions 2.27.0 and newer. - LabelValueLengthLimit uint64 `json:"labelValueLengthLimit,omitempty"` - // Attaches node metadata to discovered targets. - // Requires Prometheus v2.35.0 and above. + // + // It requires Prometheus >= v2.27.0. + // + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + // Per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // `attachMetadata` defines additional metadata which is added to the + // discovered targets. + // + // It requires Prometheus >= v2.37.0. + // + // +optional AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` + + // When defined, bodySizeLimit specifies a job level limit on the size + // of uncompressed response body that will be accepted by Prometheus. + // + // It requires Prometheus >= v2.28.0. + // + // +optional + BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"` } // PodMonitorList is a list of PodMonitors. @@ -91,66 +162,151 @@ func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } -// PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. +// PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by +// Prometheus. +// // +k8s:openapi-gen=true type PodMetricsEndpoint struct { - // Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. + // Name of the Pod port which this endpoint refers to. + // + // It takes precedence over `targetPort`. Port string `json:"port,omitempty"` - // Deprecated: Use 'port' instead. + + // Name or number of the target port of the `Pod` object behind the Service, the + // port must be specified with container port property. + // + // Deprecated: use 'port' instead. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` - // HTTP path to scrape for metrics. + + // HTTP path from which to scrape for metrics. + // // If empty, Prometheus uses the default value (e.g. `/metrics`). Path string `json:"path,omitempty"` + // HTTP scheme to use for scraping. - // `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. + // + // `http` and `https` are the expected values unless you rewrite the + // `__scheme__` label via relabeling. + // // If empty, Prometheus uses the default value `http`. + // // +kubebuilder:validation:Enum=http;https Scheme string `json:"scheme,omitempty"` - // Optional HTTP URL parameters + + // `params` define optional HTTP URL parameters. Params map[string][]string `json:"params,omitempty"` - // Interval at which metrics should be scraped - // If not specified Prometheus' global scrape interval is used. + + // Interval at which Prometheus scrapes the metrics from the target. + // + // If empty, Prometheus uses the global scrape interval. Interval Duration `json:"interval,omitempty"` - // Timeout after which the scrape is ended - // If not specified, the Prometheus global scrape interval is used. + + // Timeout after which Prometheus considers the scrape to be failed. + // + // If empty, Prometheus uses the global scrape timeout unless it is less + // than the target's scrape interval value in which the latter is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` - // TLS configuration to use when scraping the endpoint. - TLSConfig *PodMetricsEndpointTLSConfig `json:"tlsConfig,omitempty"` - // Secret to mount to read bearer token for scraping targets. The secret - // needs to be in the same namespace as the pod monitor and accessible by - // the Prometheus Operator. + + // TLS configuration to use when scraping the target. + // + // +optional + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` + + // `bearerTokenSecret` specifies a key of a Secret containing the bearer + // token for scraping targets. The secret needs to be in the same namespace + // as the PodMonitor object and readable by the Prometheus Operator. + // + // +optional + // + // Deprecated: use `authorization` instead. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` - // HonorLabels chooses the metric's labels on collisions with target labels. + + // When true, `honorLabels` preserves the metric's labels when they collide + // with the target's labels. HonorLabels bool `json:"honorLabels,omitempty"` - // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + + // `honorTimestamps` controls whether Prometheus preserves the timestamps + // when exposed by the target. + // + // +optional HonorTimestamps *bool `json:"honorTimestamps,omitempty"` - // BasicAuth allow an endpoint to authenticate over basic authentication. - // More info: https://prometheus.io/docs/operating/configuration/#endpoint + + // `trackTimestampsStaleness` defines whether Prometheus tracks staleness of + // the metrics that have an explicit timestamp present in scraped data. + // Has no effect if `honorTimestamps` is false. + // + // It requires Prometheus >= v2.48.0. + // + // +optional + TrackTimestampsStaleness *bool `json:"trackTimestampsStaleness,omitempty"` + + // `basicAuth` configures the Basic Authentication credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `authorization`, or `oauth2`. + // + // +optional BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + + // `oauth2` configures the OAuth2 settings to use when scraping the target. + // + // It requires Prometheus >= 2.27.0. + // + // Cannot be set at the same time as `authorization`, or `basicAuth`. + // + // +optional OAuth2 *OAuth2 `json:"oauth2,omitempty"` - // Authorization section for this endpoint + + // `authorization` configures the Authorization header credentials to use when + // scraping the target. + // + // Cannot be set at the same time as `basicAuth`, or `oauth2`. + // + // +optional Authorization *SafeAuthorization `json:"authorization,omitempty"` - // MetricRelabelConfigs to apply to samples before ingestion. + + // `metricRelabelings` configures the relabeling rules to apply to the + // samples before ingestion. + // + // +optional MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` - // RelabelConfigs to apply to samples before scraping. - // Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields. + + // `relabelings` configures the relabeling rules to apply the target's + // metadata labels. + // + // The Operator automatically adds relabelings for a few standard Kubernetes fields. + // // The original scrape job's name is available via the `__tmp_prometheus_job_name` label. + // // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // +optional RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` - // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + + // `proxyURL` configures the HTTP Proxy URL (e.g. + // "http://proxyserver:2195") to go through when scraping the target. + // + // +optional ProxyURL *string `json:"proxyUrl,omitempty"` - // FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. + + // `followRedirects` defines whether the scrape requests should follow HTTP + // 3xx redirects. + // + // +optional FollowRedirects *bool `json:"followRedirects,omitempty"` - // Whether to enable HTTP2. + + // `enableHttp2` can be used to disable HTTP2 when scraping the target. + // + // +optional EnableHttp2 *bool `json:"enableHttp2,omitempty"` - // Drop pods that are not running. (Failed, Succeeded). Enabled by default. + + // When true, the pods which are not running (e.g. either in Failed or + // Succeeded state) are dropped during the target discovery. + // + // If unset, the filtering is enabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + // + // +optional FilterRunning *bool `json:"filterRunning,omitempty"` } - -// PodMetricsEndpointTLSConfig specifies TLS configuration parameters. -// +k8s:openapi-gen=true -type PodMetricsEndpointTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go index 46c107705..e740cee08 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go @@ -64,7 +64,7 @@ type ProbeSpec struct { // If not specified, the Prometheus global scrape timeout is used. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint. - TLSConfig *ProbeTLSConfig `json:"tlsConfig,omitempty"` + TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the probe and accessible by // the Prometheus Operator. @@ -79,18 +79,45 @@ type ProbeSpec struct { // Authorization section for this endpoint Authorization *SafeAuthorization `json:"authorization,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - SampleLimit uint64 `json:"sampleLimit,omitempty"` + // +optional + SampleLimit *uint64 `json:"sampleLimit,omitempty"` // TargetLimit defines a limit on the number of scraped targets that will be accepted. - TargetLimit uint64 `json:"targetLimit,omitempty"` + // +optional + TargetLimit *uint64 `json:"targetLimit,omitempty"` + // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` // Per-scrape limit on number of labels that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelLimit uint64 `json:"labelLimit,omitempty"` + // +optional + LabelLimit *uint64 `json:"labelLimit,omitempty"` // Per-scrape limit on length of labels name that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelNameLengthLimit uint64 `json:"labelNameLengthLimit,omitempty"` + // +optional + LabelNameLengthLimit *uint64 `json:"labelNameLengthLimit,omitempty"` // Per-scrape limit on length of labels value that will be accepted for a sample. // Only valid in Prometheus versions 2.27.0 and newer. - LabelValueLengthLimit uint64 `json:"labelValueLengthLimit,omitempty"` + // +optional + LabelValueLengthLimit *uint64 `json:"labelValueLengthLimit,omitempty"` + // Per-scrape limit on the number of targets dropped by relabeling + // that will be kept in memory. 0 means no limit. + // + // It requires Prometheus >= v2.47.0. + // + // +optional + KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"` + + // The scrape class to apply. + // +optional + // +kubebuilder:validation:MinLength=1 + ScrapeClassName *string `json:"scrapeClass,omitempty"` } // ProbeTargets defines how to discover the probed targets. @@ -193,9 +220,3 @@ type ProbeList struct { func (l *ProbeList) DeepCopyObject() runtime.Object { return l.DeepCopy() } - -// ProbeTLSConfig specifies TLS configuration parameters for the prober. -// +k8s:openapi-gen=true -type ProbeTLSConfig struct { - SafeTLSConfig `json:",inline"` -} diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go index 49b18c284..4953225cb 100644 --- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go +++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go @@ -17,9 +17,12 @@ package v1 import ( "strings" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -29,15 +32,29 @@ const ( PrometheusKindKey = "prometheus" ) +// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics. +// Supported values are: +// * `OpenMetricsText0.0.1` +// * `OpenMetricsText1.0.0` +// * `PrometheusProto` +// * `PrometheusText0.0.4` +// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4 +type ScrapeProtocol string + // PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation. // +k8s:deepcopy-gen=false type PrometheusInterface interface { metav1.ObjectMetaAccessor - GetTypeMeta() metav1.TypeMeta + schema.ObjectKind + GetCommonPrometheusFields() CommonPrometheusFields SetCommonPrometheusFields(CommonPrometheusFields) + + GetStatus() PrometheusStatus } +var _ = PrometheusInterface(&Prometheus{}) + func (l *Prometheus) GetCommonPrometheusFields() CommonPrometheusFields { return l.Spec.CommonPrometheusFields } @@ -46,164 +63,293 @@ func (l *Prometheus) SetCommonPrometheusFields(f CommonPrometheusFields) { l.Spec.CommonPrometheusFields = f } -func (l *Prometheus) GetTypeMeta() metav1.TypeMeta { - return l.TypeMeta +func (l *Prometheus) GetStatus() PrometheusStatus { + return l.Status +} + +// +kubebuilder:validation:Enum=OnResource;OnShard +type AdditionalLabelSelectors string + +const ( + // Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards). + ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource" + + // Automatically add a label selector that will select all pods matching the same shard. + ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard" +) + +type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint + +type TopologySpreadConstraint struct { + CoreV1TopologySpreadConstraint `json:",inline"` + + //+optional + // Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint. + AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"` } // CommonPrometheusFields are the options available to both the Prometheus server and agent. // +k8s:deepcopy-gen=true type CommonPrometheusFields struct { - // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. + // PodMetadata configures labels and annotations which are propagated to the Prometheus pods. + // + // The following items are reserved and cannot be overridden: + // * "prometheus" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/instance" label, set to the name of the Prometheus object. + // * "app.kubernetes.io/managed-by" label, set to "prometheus-operator". + // * "app.kubernetes.io/name" label, set to "prometheus". + // * "app.kubernetes.io/version" label, set to the Prometheus version. + // * "operator.prometheus.io/name" label, set to the name of the Prometheus object. + // * "operator.prometheus.io/shard" label, set to the shard number of the Prometheus object. + // * "kubectl.kubernetes.io/default-container" annotation, set to "prometheus". PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` - // ServiceMonitors to be selected for target discovery. + + // ServiceMonitors to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` - // Namespace's labels to match for ServiceMonitor discovery. If nil, only - // check own namespace. + // Namespaces to match for ServicedMonitors discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` - // *Experimental* PodMonitors to be selected for target discovery. + + // PodMonitors to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` - // Namespace's labels to match for PodMonitor discovery. If nil, only - // check own namespace. + // Namespaces to match for PodMonitors discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` - // *Experimental* Probes to be selected for target discovery. + + // Probes to be selected for target discovery. An empty label selector + // matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` - // *Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace. + // Namespaces to match for Probe discovery. An empty label + // selector matches all namespaces. A null label selector matches the + // current namespace only. ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` - // *Experimental* ScrapeConfigs to be selected for target discovery. + + // ScrapeConfigs to be selected for target discovery. An empty label + // selector matches all objects. A null label selector matches no objects. // // If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector` // and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged. // The Prometheus operator will ensure that the Prometheus configuration's // Secret exists, but it is the responsibility of the user to provide the raw // gzipped Prometheus configuration under the `prometheus.yaml.gz` key. - // This behavior is deprecated and will be removed in the next major version + // This behavior is *deprecated* and will be removed in the next major version // of the custom resource definition. It is recommended to use // `spec.additionalScrapeConfigs` instead. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"` - // Namespace's labels to match for ScrapeConfig discovery. If nil, only - // check own namespace. + // Namespaces to match for ScrapeConfig discovery. An empty label selector + // matches all namespaces. A null label selector matches the current + // namespace only. + // + // Note that the ScrapeConfig custom resource definition is currently at Alpha level. + // + // +optional ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"` - // Version of Prometheus to be deployed. + + // Version of Prometheus being deployed. The operator uses this information + // to generate the Prometheus StatefulSet + configuration files. + // + // If not specified, the operator assumes the latest upstream version of + // Prometheus available at the time when the version of the operator was + // released. Version string `json:"version,omitempty"` + // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` - // Image if specified has precedence over baseImage, tag and sha - // combinations. Specifying the version is still necessary to ensure the - // Prometheus Operator knows what version of Prometheus is being - // configured. + + // Container image name for Prometheus. If specified, it takes precedence + // over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. + // + // Specifying `spec.version` is still necessary to ensure the Prometheus + // Operator knows which version of Prometheus is being configured. + // + // If neither `spec.image` nor `spec.baseImage` are defined, the operator + // will use the latest upstream version of Prometheus available at the time + // when the operator was released. + // + // +optional Image *string `json:"image,omitempty"` // Image pull policy for the 'prometheus', 'init-config-reloader' and 'config-reloader' containers. // See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. // +kubebuilder:validation:Enum="";Always;Never;IfNotPresent ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` - // An optional list of references to secrets in the same namespace - // to use for pulling prometheus and alertmanager images from registries - // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // An optional list of references to Secrets in the same namespace + // to use for pulling images from registries. + // See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Number of replicas of each shard to deploy for a Prometheus deployment. - // Number of replicas multiplied by shards is the total number of Pods + // `spec.replicas` multiplied by `spec.shards` is the total number of Pods // created. + // + // Default: 1 + // +optional Replicas *int32 `json:"replicas,omitempty"` - // EXPERIMENTAL: Number of shards to distribute targets onto. Number of - // replicas multiplied by shards is the total number of Pods created. Note - // that scaling down shards will not reshard data onto remaining instances, - // it must be manually moved. Increasing shards will not reshard data - // either but it will continue to be available from the same instances. To - // query globally use Thanos sidecar and Thanos querier or remote write - // data to a central location. Sharding is done on the content of the - // `__address__` target meta-label. + // Number of shards to distribute targets onto. `spec.replicas` + // multiplied by `spec.shards` is the total number of Pods created. + // + // Note that scaling down shards will not reshard data onto remaining + // instances, it must be manually moved. Increasing shards will not reshard + // data either but it will continue to be available from the same + // instances. To query globally, use Thanos sidecar and Thanos querier or + // remote write data to a central location. + // + // Sharding is performed on the content of the `__address__` target meta-label + // for PodMonitors and ServiceMonitors and `__param_target__` for Probes. + // + // Default: 1 + // +optional Shards *int32 `json:"shards,omitempty"` - // Name of Prometheus external label used to denote replica name. - // Defaults to the value of `prometheus_replica`. External label will - // _not_ be added when value is set to empty string (`""`). + + // Name of Prometheus external label used to denote the replica name. + // The external label will _not_ be added when the field is set to the + // empty string (`""`). + // + // Default: "prometheus_replica" + // +optional ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` - // Name of Prometheus external label used to denote Prometheus instance - // name. Defaults to the value of `prometheus`. External label will - // _not_ be added when value is set to empty string (`""`). + // Name of Prometheus external label used to denote the Prometheus instance + // name. The external label will _not_ be added when the field is set to + // the empty string (`""`). + // + // Default: "prometheus" + // +optional PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` - // Log level for Prometheus to be configured with. - //+kubebuilder:validation:Enum="";debug;info;warn;error + + // Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";debug;info;warn;error LogLevel string `json:"logLevel,omitempty"` - // Log format for Prometheus to be configured with. - //+kubebuilder:validation:Enum="";logfmt;json + // Log format for Log level for Prometheus and the config-reloader sidecar. + // +kubebuilder:validation:Enum="";logfmt;json LogFormat string `json:"logFormat,omitempty"` - // Interval between consecutive scrapes. Default: `30s` + + // Interval between consecutive scrapes. + // + // Default: "30s" // +kubebuilder:default:="30s" ScrapeInterval Duration `json:"scrapeInterval,omitempty"` - // Number of seconds to wait for target to respond before erroring. + // Number of seconds to wait until a scrape request times out. ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"` + + // The protocols to negotiate during a scrape. It tells clients the + // protocols supported by Prometheus in order of preference (from most to least preferred). + // + // If unset, Prometheus uses its default value. + // + // It requires Prometheus >= v2.49.0. + // + // +listType=set + // +optional + ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"` + // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). + // Labels defined by `spec.replicaExternalLabelName` and + // `spec.prometheusExternalLabelName` take precedence over this list. ExternalLabels map[string]string `json:"externalLabels,omitempty"` - // Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. Defaults to the value of `false`. + + // Enable Prometheus to be used as a receiver for the Prometheus remote + // write protocol. + // // WARNING: This is not considered an efficient way of ingesting samples. // Use it with caution for specific low-volume use cases. // It is not suitable for replacing the ingestion via scraping and turning // Prometheus into a push-based metrics collection system. // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver - // Only valid in Prometheus versions 2.33.0 and newer. + // + // It requires Prometheus >= v2.33.0. EnableRemoteWriteReceiver bool `json:"enableRemoteWriteReceiver,omitempty"` - // Enable access to Prometheus disabled features. By default, no features are enabled. - // Enabling disabled features is entirely outside the scope of what the maintainers will - // support and by doing so, you accept that this behaviour may break at any - // time without notice. - // For more information see https://prometheus.io/docs/prometheus/latest/disabled_features/ + + // Enable access to Prometheus feature flags. By default, no features are enabled. + // + // Enabling features which are disabled by default is entirely outside the + // scope of what the maintainers will support and by doing so, you accept + // that this behaviour may break at any time without notice. + // + // For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/ EnableFeatures []string `json:"enableFeatures,omitempty"` - // The external URL the Prometheus instances will be available under. This is - // necessary to generate correct URLs. This is necessary if Prometheus is not - // served from root of a DNS name. + + // The external URL under which the Prometheus service is externally + // available. This is necessary to generate correct URLs (for instance if + // Prometheus is accessible behind an Ingress resource). ExternalURL string `json:"externalUrl,omitempty"` - // The route prefix Prometheus registers HTTP handlers for. This is useful, - // if using ExternalURL and a proxy is rewriting HTTP routes of a request, - // and the actual ExternalURL is still true, but the server serves requests - // under a different route prefix. For example for use with `kubectl proxy`. + // The route prefix Prometheus registers HTTP handlers for. + // + // This is useful when using `spec.externalURL`, and a proxy is rewriting + // HTTP routes of a request, and the actual ExternalURL is still true, but + // the server serves requests under a different route prefix. For example + // for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` - // Storage spec to specify how storage shall be used. + + // Storage defines the storage used by Prometheus. Storage *StorageSpec `json:"storage,omitempty"` - // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will - // be appended to other volumes that are generated as a result of StorageSpec objects. + + // Volumes allows the configuration of additional volumes on the output + // StatefulSet definition. Volumes specified will be appended to other + // volumes that are generated as a result of StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` - // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. - // VolumeMounts specified will be appended to other VolumeMounts in the prometheus container, - // that are generated as a result of StorageSpec objects. + // VolumeMounts allows the configuration of additional VolumeMounts. + // + // VolumeMounts will be appended to other VolumeMounts in the 'prometheus' + // container, that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` - // Defines the web command line flags when starting Prometheus. + + // The field controls if and how PVCs are deleted during the lifecycle of a StatefulSet. + // The default behavior is all PVCs are retained. + // This is an alpha field from kubernetes 1.23 until 1.26 and a beta field from 1.26. + // It requires enabling the StatefulSetAutoDeletePVC feature gate. + // + // +optional + PersistentVolumeClaimRetentionPolicy *appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + + // Defines the configuration of the Prometheus web server. Web *PrometheusWebSpec `json:"web,omitempty"` - // Define resources requests and limits for single Pods. + + // Defines the resources requests and limits of the 'prometheus' container. Resources v1.ResourceRequirements `json:"resources,omitempty"` - // Define which Nodes the Pods are scheduled on. + + // Defines on which Nodes the Pods are scheduled. NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` + // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // Each Secret is added to the StatefulSet definition as a volume named `secret-`. @@ -214,41 +360,66 @@ type CommonPrometheusFields struct { // Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. // The ConfigMaps are mounted into /etc/prometheus/configmaps/ in the 'prometheus' container. ConfigMaps []string `json:"configMaps,omitempty"` - // If specified, the pod's scheduling constraints. + + // Defines the Pods' affinity scheduling rules if specified. + // +optional Affinity *v1.Affinity `json:"affinity,omitempty"` - // If specified, the pod's tolerations. + // Defines the Pods' tolerations if specified. + // +optional Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // If specified, the pod's topology spread constraints. - TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` - // remoteWrite is the list of remote write configurations. + + // Defines the pod's topology spread constraints if specified. + //+optional + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + + // Defines the list of remote write configurations. + // +optional RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. + // +optional SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` - // ListenLocal makes the Prometheus server listen on loopback, so that it - // does not bind against the Pod IP. + + // When true, the Prometheus server listens on the loopback address + // instead of the Pod IP's address. ListenLocal bool `json:"listenLocal,omitempty"` + // Containers allows injecting additional containers or modifying operator // generated containers. This can be used to allow adding an authentication - // proxy to a Prometheus pod or to change the behavior of an operator - // generated container. Containers described here modify an operator - // generated container if they share the same name and modifications are - // done via a strategic merge patch. The current container names are: - // `prometheus`, `config-reloader`, and `thanos-sidecar`. Overriding - // containers is entirely outside the scope of what the maintainers will - // support and by doing so, you accept that this behaviour may break at any - // time without notice. + // proxy to the Pods or to change the behavior of an operator generated + // container. Containers described here modify an operator generated + // container if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of containers managed by the operator are: + // * `prometheus` + // * `config-reloader` + // * `thanos-sidecar` + // + // Overriding containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional Containers []v1.Container `json:"containers,omitempty"` - // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. - // fetch secrets for injection into the Prometheus configuration from external sources. Any errors - // during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // InitContainers described here modify an operator - // generated init containers if they share the same name and modifications are - // done via a strategic merge patch. The current init container name is: - // `init-config-reloader`. Overriding init containers is entirely outside the - // scope of what the maintainers will support and by doing so, you accept that - // this behaviour may break at any time without notice. + // InitContainers allows injecting initContainers to the Pod definition. Those + // can be used to e.g. fetch secrets for injection into the Prometheus + // configuration from external sources. Any errors during the execution of + // an initContainer will lead to a restart of the Pod. More info: + // https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // InitContainers described here modify an operator generated init + // containers if they share the same name and modifications are done via a + // strategic merge patch. + // + // The names of init container name managed by the operator are: + // * `init-config-reloader`. + // + // Overriding init containers is entirely outside the scope of what the + // maintainers will support and by doing so, you accept that this behaviour + // may break at any time without notice. + // +optional InitContainers []v1.Container `json:"initContainers,omitempty"` + // AdditionalScrapeConfigs allows specifying a key of a Secret containing // additional Prometheus scrape configurations. Scrape configurations // specified are appended to the configurations generated by the Prometheus @@ -260,22 +431,36 @@ type CommonPrometheusFields struct { // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible scrape configs are going to break // Prometheus after the upgrade. + // +optional AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` - // APIServerConfig allows specifying a host and auth methods to access apiserver. - // If left empty, Prometheus is assumed to run inside of the cluster - // and will discover API servers automatically and use the pod's CA certificate + + // APIServerConfig allows specifying a host and auth methods to access the + // Kuberntees API server. + // If null, Prometheus is assumed to run inside of the cluster: it will + // discover the API servers automatically and use the Pod's CA certificate // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + // +optional APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` - // Priority class assigned to the Pods + + // Priority class assigned to the Pods. PriorityClassName string `json:"priorityClassName,omitempty"` // Port name used for the pods and governing service. - // Defaults to `web`. + // Default: "web" // +kubebuilder:default:="web" PortName string `json:"portName,omitempty"` - // ArbitraryFSAccessThroughSMs configures whether configuration - // based on a service monitor can access arbitrary files on the file system - // of the Prometheus container e.g. bearer token files. + + // When true, ServiceMonitor, PodMonitor and Probe object are forbidden to + // reference arbitrary files on the file system of the 'prometheus' + // container. + // When a ServiceMonitor's endpoint specifies a `bearerTokenFile` value + // (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), a + // malicious target can get access to the Prometheus service account's + // token in the Prometheus' scrape request. Setting + // `spec.arbitraryFSAccessThroughSM` to 'true' would prevent the attack. + // Users should instead provide the credentials using the + // `spec.bearerTokenSecret` field. ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` + // When true, Prometheus resolves label conflicts by renaming the labels in // the scraped data to "exported_