From ccf9cc06474849d438269b9a4e762499296dd471 Mon Sep 17 00:00:00 2001 From: mykolalosev Date: Fri, 20 Jan 2023 13:37:27 +0200 Subject: [PATCH] issue-131 --- .../v1beta1/zz_autoscalingpolicy_types.go | 236 + apis/dataproc/v1beta1/zz_cluster_types.go | 956 +++ .../dataproc/v1beta1/zz_generated.deepcopy.go | 7112 +++++++++++++++++ apis/dataproc/v1beta1/zz_generated.managed.go | 284 + .../v1beta1/zz_generated.managedlist.go | 56 + .../v1beta1/zz_generated.resolvers.go | 102 + .../v1beta1/zz_generated_terraformed.go | 322 + apis/dataproc/v1beta1/zz_groupversion_info.go | 44 + apis/dataproc/v1beta1/zz_job_types.go | 525 ++ .../v1beta1/zz_workflowtemplate_types.go | 1286 +++ apis/zz_register.go | 2 + config/externalname.go | 11 + config/externalnamenottested.go | 8 - .../dataproc/autoscalingpolicy.yaml | 39 + examples-generated/dataproc/cluster.yaml | 15 + examples-generated/dataproc/job.yaml | 47 + .../dataproc/workflowtemplate.yaml | 48 + examples/dataproc/autoscalingpolicy.yaml | 18 + examples/dataproc/cluster.yaml | 16 + examples/dataproc/job.yaml | 48 + examples/dataproc/workflowtemplate.yaml | 45 + .../autoscalingpolicy/zz_controller.go | 64 + .../dataproc/cluster/zz_controller.go | 63 + .../controller/dataproc/job/zz_controller.go | 63 + .../workflowtemplate/zz_controller.go | 64 + internal/controller/zz_setup.go | 8 + ...oc.gcp.upbound.io_autoscalingpolicies.yaml | 452 ++ .../dataproc.gcp.upbound.io_clusters.yaml | 1249 +++ .../crds/dataproc.gcp.upbound.io_jobs.yaml | 936 +++ ...proc.gcp.upbound.io_workflowtemplates.yaml | 1976 +++++ 30 files changed, 16087 insertions(+), 8 deletions(-) create mode 100755 apis/dataproc/v1beta1/zz_autoscalingpolicy_types.go create mode 100755 apis/dataproc/v1beta1/zz_cluster_types.go create mode 100644 apis/dataproc/v1beta1/zz_generated.deepcopy.go create mode 100644 apis/dataproc/v1beta1/zz_generated.managed.go create mode 100644 apis/dataproc/v1beta1/zz_generated.managedlist.go create mode 100644 apis/dataproc/v1beta1/zz_generated.resolvers.go create mode 100755 apis/dataproc/v1beta1/zz_generated_terraformed.go create mode 100755 apis/dataproc/v1beta1/zz_groupversion_info.go create mode 100755 apis/dataproc/v1beta1/zz_job_types.go create mode 100755 apis/dataproc/v1beta1/zz_workflowtemplate_types.go create mode 100644 examples-generated/dataproc/autoscalingpolicy.yaml create mode 100644 examples-generated/dataproc/cluster.yaml create mode 100644 examples-generated/dataproc/job.yaml create mode 100644 examples-generated/dataproc/workflowtemplate.yaml create mode 100644 examples/dataproc/autoscalingpolicy.yaml create mode 100644 examples/dataproc/cluster.yaml create mode 100644 examples/dataproc/job.yaml create mode 100644 examples/dataproc/workflowtemplate.yaml create mode 100755 internal/controller/dataproc/autoscalingpolicy/zz_controller.go create mode 100755 internal/controller/dataproc/cluster/zz_controller.go create mode 100755 internal/controller/dataproc/job/zz_controller.go create mode 100755 internal/controller/dataproc/workflowtemplate/zz_controller.go create mode 100644 package/crds/dataproc.gcp.upbound.io_autoscalingpolicies.yaml create mode 100644 package/crds/dataproc.gcp.upbound.io_clusters.yaml create mode 100644 package/crds/dataproc.gcp.upbound.io_jobs.yaml create mode 100644 package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml diff --git a/apis/dataproc/v1beta1/zz_autoscalingpolicy_types.go b/apis/dataproc/v1beta1/zz_autoscalingpolicy_types.go new file mode 100755 index 000000000..253a075f6 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_autoscalingpolicy_types.go @@ -0,0 +1,236 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AutoscalingPolicyObservation struct { + + // an identifier for the resource with format projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // The "resource name" of the autoscaling policy. + Name *string `json:"name,omitempty" tf:"name,omitempty"` +} + +type AutoscalingPolicyParameters struct { + + // Basic algorithm for autoscaling. + // Structure is documented below. + // +kubebuilder:validation:Optional + BasicAlgorithm []BasicAlgorithmParameters `json:"basicAlgorithm,omitempty" tf:"basic_algorithm,omitempty"` + + // The location where the autoscaling policy should reside. + // The default value is global. + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Describes how the autoscaler will operate for secondary workers. + // Structure is documented below. + // +kubebuilder:validation:Optional + SecondaryWorkerConfig []SecondaryWorkerConfigParameters `json:"secondaryWorkerConfig,omitempty" tf:"secondary_worker_config,omitempty"` + + // Describes how the autoscaler will operate for primary workers. + // Structure is documented below. + // +kubebuilder:validation:Optional + WorkerConfig []WorkerConfigParameters `json:"workerConfig,omitempty" tf:"worker_config,omitempty"` +} + +type BasicAlgorithmObservation struct { +} + +type BasicAlgorithmParameters struct { + + // Duration between scaling events. A scaling period starts after the + // update operation from the previous event has completed. + // Bounds: [2m, 1d]. Default: 2m. + // +kubebuilder:validation:Optional + CooldownPeriod *string `json:"cooldownPeriod,omitempty" tf:"cooldown_period,omitempty"` + + // YARN autoscaling configuration. + // Structure is documented below. + // +kubebuilder:validation:Required + YarnConfig []YarnConfigParameters `json:"yarnConfig" tf:"yarn_config,omitempty"` +} + +type SecondaryWorkerConfigObservation struct { +} + +type SecondaryWorkerConfigParameters struct { + + // Maximum number of instances for this group. Note that by default, clusters will not use + // secondary workers. Required for secondary workers if the minimum secondary instances is set. + // Bounds: [minInstances, ). Defaults to 0. + // +kubebuilder:validation:Optional + MaxInstances *float64 `json:"maxInstances,omitempty" tf:"max_instances,omitempty"` + + // Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0. + // +kubebuilder:validation:Optional + MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` + + // Weight for the instance group, which is used to determine the fraction of total workers + // in the cluster from this instance group. For example, if primary workers have weight 2, + // and secondary workers have weight 1, the cluster will have approximately 2 primary workers + // for each secondary worker. + // The cluster may not reach the specified balance if constrained by min/max bounds or other + // autoscaling settings. For example, if maxInstances for secondary workers is 0, then only + // primary workers will be added. The cluster can also be out of balance when created. + // If weight is not set on any instance group, the cluster will default to equal weight for + // all groups: the cluster will attempt to maintain an equal number of workers in each group + // within the configured size bounds for each group. If weight is set for one group only, + // the cluster will default to zero weight on the unset group. For example if weight is set + // only on primary workers, the cluster will use primary workers only and no secondary workers. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type WorkerConfigObservation struct { +} + +type WorkerConfigParameters struct { + + // Maximum number of instances for this group. + // +kubebuilder:validation:Required + MaxInstances *float64 `json:"maxInstances" tf:"max_instances,omitempty"` + + // Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2. + // +kubebuilder:validation:Optional + MinInstances *float64 `json:"minInstances,omitempty" tf:"min_instances,omitempty"` + + // Weight for the instance group, which is used to determine the fraction of total workers + // in the cluster from this instance group. For example, if primary workers have weight 2, + // and secondary workers have weight 1, the cluster will have approximately 2 primary workers + // for each secondary worker. + // The cluster may not reach the specified balance if constrained by min/max bounds or other + // autoscaling settings. For example, if maxInstances for secondary workers is 0, then only + // primary workers will be added. The cluster can also be out of balance when created. + // If weight is not set on any instance group, the cluster will default to equal weight for + // all groups: the cluster will attempt to maintain an equal number of workers in each group + // within the configured size bounds for each group. If weight is set for one group only, + // the cluster will default to zero weight on the unset group. For example if weight is set + // only on primary workers, the cluster will use primary workers only and no secondary workers. + // +kubebuilder:validation:Optional + Weight *float64 `json:"weight,omitempty" tf:"weight,omitempty"` +} + +type YarnConfigObservation struct { +} + +type YarnConfigParameters struct { + + // Timeout for YARN graceful decommissioning of Node Managers. Specifies the + // duration to wait for jobs to complete before forcefully removing workers + // (and potentially interrupting jobs). Only applicable to downscaling operations. + // Bounds: [0s, 1d]. + // +kubebuilder:validation:Required + GracefulDecommissionTimeout *string `json:"gracefulDecommissionTimeout" tf:"graceful_decommission_timeout,omitempty"` + + // Fraction of average pending memory in the last cooldown period for which to + // remove workers. A scale-down factor of 1 will result in scaling down so that there + // is no available memory remaining after the update (more aggressive scaling). + // A scale-down factor of 0 disables removing workers, which can be beneficial for + // autoscaling a single job. + // Bounds: [0.0, 1.0]. + // +kubebuilder:validation:Required + ScaleDownFactor *float64 `json:"scaleDownFactor" tf:"scale_down_factor,omitempty"` + + // Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. + // For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must + // recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 + // means the autoscaler will scale down on any recommended change. + // Bounds: [0.0, 1.0]. Default: 0.0. + // +kubebuilder:validation:Optional + ScaleDownMinWorkerFraction *float64 `json:"scaleDownMinWorkerFraction,omitempty" tf:"scale_down_min_worker_fraction,omitempty"` + + // Fraction of average pending memory in the last cooldown period for which to + // add workers. A scale-up factor of 1.0 will result in scaling up so that there + // is no pending memory remaining after the update (more aggressive scaling). + // A scale-up factor closer to 0 will result in a smaller magnitude of scaling up + // (less aggressive scaling). + // Bounds: [0.0, 1.0]. + // +kubebuilder:validation:Required + ScaleUpFactor *float64 `json:"scaleUpFactor" tf:"scale_up_factor,omitempty"` + + // Minimum scale-up threshold as a fraction of total cluster size before scaling + // occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler + // must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of + // 0 means the autoscaler will scale up on any recommended change. + // Bounds: [0.0, 1.0]. Default: 0.0. + // +kubebuilder:validation:Optional + ScaleUpMinWorkerFraction *float64 `json:"scaleUpMinWorkerFraction,omitempty" tf:"scale_up_min_worker_fraction,omitempty"` +} + +// AutoscalingPolicySpec defines the desired state of AutoscalingPolicy +type AutoscalingPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider AutoscalingPolicyParameters `json:"forProvider"` +} + +// AutoscalingPolicyStatus defines the observed state of AutoscalingPolicy. +type AutoscalingPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider AutoscalingPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// AutoscalingPolicy is the Schema for the AutoscalingPolicys API. Describes an autoscaling policy for Dataproc cluster autoscaler. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type AutoscalingPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec AutoscalingPolicySpec `json:"spec"` + Status AutoscalingPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AutoscalingPolicyList contains a list of AutoscalingPolicys +type AutoscalingPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutoscalingPolicy `json:"items"` +} + +// Repository type metadata. +var ( + AutoscalingPolicy_Kind = "AutoscalingPolicy" + AutoscalingPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: AutoscalingPolicy_Kind}.String() + AutoscalingPolicy_KindAPIVersion = AutoscalingPolicy_Kind + "." + CRDGroupVersion.String() + AutoscalingPolicy_GroupVersionKind = CRDGroupVersion.WithKind(AutoscalingPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&AutoscalingPolicy{}, &AutoscalingPolicyList{}) +} diff --git a/apis/dataproc/v1beta1/zz_cluster_types.go b/apis/dataproc/v1beta1/zz_cluster_types.go new file mode 100755 index 000000000..59897c580 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_cluster_types.go @@ -0,0 +1,956 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type AcceleratorsObservation struct { +} + +type AcceleratorsParameters struct { + + // The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8. + // +kubebuilder:validation:Required + AcceleratorCount *float64 `json:"acceleratorCount" tf:"accelerator_count,omitempty"` + + // The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80. + // +kubebuilder:validation:Required + AcceleratorType *string `json:"acceleratorType" tf:"accelerator_type,omitempty"` +} + +type AutoscalingConfigObservation struct { +} + +type AutoscalingConfigParameters struct { + + // The autoscaling policy used by the cluster. + // +kubebuilder:validation:Required + PolicyURI *string `json:"policyUri" tf:"policy_uri,omitempty"` +} + +type AutoscalingObservation struct { +} + +type AutoscalingParameters struct { + + // The maximum number of nodes in the node pool. Must be >= minNodeCount, and must be > 0. + // +kubebuilder:validation:Optional + MaxNodeCount *float64 `json:"maxNodeCount,omitempty" tf:"max_node_count,omitempty"` + + // The minimum number of nodes in the node pool. Must be >= 0 and <= maxNodeCount. + // +kubebuilder:validation:Optional + MinNodeCount *float64 `json:"minNodeCount,omitempty" tf:"min_node_count,omitempty"` +} + +type AuxiliaryServicesConfigMetastoreConfigObservation struct { +} + +type AuxiliaryServicesConfigMetastoreConfigParameters struct { + + // Resource name of an existing Dataproc Metastore service. + // +kubebuilder:validation:Optional + DataprocMetastoreService *string `json:"dataprocMetastoreService,omitempty" tf:"dataproc_metastore_service,omitempty"` +} + +type AuxiliaryServicesConfigObservation struct { +} + +type AuxiliaryServicesConfigParameters struct { + + // The config setting for metastore service with the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + MetastoreConfig []AuxiliaryServicesConfigMetastoreConfigParameters `json:"metastoreConfig,omitempty" tf:"metastore_config,omitempty"` + + // The Spark History Server configuration for the workload. + // +kubebuilder:validation:Optional + SparkHistoryServerConfig []SparkHistoryServerConfigParameters `json:"sparkHistoryServerConfig,omitempty" tf:"spark_history_server_config,omitempty"` +} + +type ClusterConfigObservation struct { + + // The name of the cloud storage bucket ultimately used to house the staging data + // for the cluster. If staging_bucket is specified, it will contain this value, otherwise + // it will be the auto generated name. + Bucket *string `json:"bucket,omitempty" tf:"bucket,omitempty"` + + // The config settings for port access on the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + EndpointConfig []EndpointConfigObservation `json:"endpointConfig,omitempty" tf:"endpoint_config,omitempty"` + + // The settings for auto deletion cluster schedule. + // Structure defined below. + // +kubebuilder:validation:Optional + LifecycleConfig []LifecycleConfigObservation `json:"lifecycleConfig,omitempty" tf:"lifecycle_config,omitempty"` + + // The Google Compute Engine config settings for the master instances + // in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + MasterConfig []MasterConfigObservation `json:"masterConfig,omitempty" tf:"master_config,omitempty"` + + // The Google Compute Engine config settings for the additional + // instances in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + PreemptibleWorkerConfig []PreemptibleWorkerConfigObservation `json:"preemptibleWorkerConfig,omitempty" tf:"preemptible_worker_config,omitempty"` + + // The config settings for software inside the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + SoftwareConfig []SoftwareConfigObservation `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` + + // The Google Compute Engine config settings for the worker instances + // in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + WorkerConfig []ClusterConfigWorkerConfigObservation `json:"workerConfig,omitempty" tf:"worker_config,omitempty"` +} + +type ClusterConfigParameters struct { + + // The autoscaling policy config associated with the cluster. + // Note that once set, if autoscaling_config is the only field set in cluster_config, it can + // only be removed by setting policy_uri = "", rather than removing the whole block. + // Structure defined below. + // +kubebuilder:validation:Optional + AutoscalingConfig []AutoscalingConfigParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + + // The Customer managed encryption keys settings for the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + EncryptionConfig []EncryptionConfigParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // The config settings for port access on the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + EndpointConfig []EndpointConfigParameters `json:"endpointConfig,omitempty" tf:"endpoint_config,omitempty"` + + // Common config settings for resources of Google Compute Engine cluster + // instances, applicable to all instances in the cluster. Structure defined below. + // +kubebuilder:validation:Optional + GceClusterConfig []GceClusterConfigParameters `json:"gceClusterConfig,omitempty" tf:"gce_cluster_config,omitempty"` + + // Commands to execute on each node after config is completed. + // You can specify multiple versions of these. Structure defined below. + // +kubebuilder:validation:Optional + InitializationAction []InitializationActionParameters `json:"initializationAction,omitempty" tf:"initialization_action,omitempty"` + + // The settings for auto deletion cluster schedule. + // Structure defined below. + // +kubebuilder:validation:Optional + LifecycleConfig []LifecycleConfigParameters `json:"lifecycleConfig,omitempty" tf:"lifecycle_config,omitempty"` + + // The Google Compute Engine config settings for the master instances + // in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + MasterConfig []MasterConfigParameters `json:"masterConfig,omitempty" tf:"master_config,omitempty"` + + // The config setting for metastore service with the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + MetastoreConfig []MetastoreConfigParameters `json:"metastoreConfig,omitempty" tf:"metastore_config,omitempty"` + + // The Google Compute Engine config settings for the additional + // instances in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + PreemptibleWorkerConfig []PreemptibleWorkerConfigParameters `json:"preemptibleWorkerConfig,omitempty" tf:"preemptible_worker_config,omitempty"` + + // Security related configuration. Structure defined below. + // +kubebuilder:validation:Optional + SecurityConfig []SecurityConfigParameters `json:"securityConfig,omitempty" tf:"security_config,omitempty"` + + // The config settings for software inside the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + SoftwareConfig []SoftwareConfigParameters `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` + + // The Cloud Storage staging bucket used to stage files, + // such as Hadoop jars, between client machines and the cluster. + // Note: If you don't explicitly specify a staging_bucket + // then GCP will auto create / assign one for you. However, you are not guaranteed + // an auto generated bucket which is solely dedicated to your cluster; it may be shared + // with other clusters in the same region/zone also choosing to use the auto generation + // option. + // +kubebuilder:validation:Optional + StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` + + // The Cloud Storage temp bucket used to store ephemeral cluster + // and jobs data, such as Spark and MapReduce history files. + // Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you. + // +kubebuilder:validation:Optional + TempBucket *string `json:"tempBucket,omitempty" tf:"temp_bucket,omitempty"` + + // The Google Compute Engine config settings for the worker instances + // in a cluster. Structure defined below. + // +kubebuilder:validation:Optional + WorkerConfig []ClusterConfigWorkerConfigParameters `json:"workerConfig,omitempty" tf:"worker_config,omitempty"` +} + +type ClusterConfigWorkerConfigObservation struct { + + // List of worker instance names which have been assigned + // to the cluster. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` +} + +type ClusterConfigWorkerConfigParameters struct { + + // The Compute Engine accelerator configuration for these instances. Can be specified multiple times. + // +kubebuilder:validation:Optional + Accelerators []WorkerConfigAcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + + // Disk Config + // +kubebuilder:validation:Optional + DiskConfig []WorkerConfigDiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // The URI for the image to use for this worker. See the guide + // for more information. + // +kubebuilder:validation:Optional + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` + + // The name of a Google Compute Engine machine type + // to create for the worker nodes. If not specified, GCP will default to a predetermined + // computed value (currently n1-standard-4). + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // The name of a minimum generation of CPU family + // for the master. If not specified, GCP will default to a predetermined computed value + // for each zone. See the guide + // for details about which CPU families are available (and defaulted) for each zone. + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Specifies the number of worker nodes to create. + // If not specified, GCP will default to a predetermined computed value (currently 2). + // There is currently a beta feature which allows you to run a + // Single Node Cluster. + // In order to take advantage of this you need to set + // "dataproc:dataproc.allow.zero.workers" = "true" in + // cluster_config.software_config.properties + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` +} + +type ClusterObservation struct { + + // Allows you to configure various aspects of the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + ClusterConfig []ClusterConfigObservation `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` +} + +type ClusterParameters struct { + + // Allows you to configure various aspects of the cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + ClusterConfig []ClusterConfigParameters `json:"clusterConfig,omitempty" tf:"cluster_config,omitempty"` + + // Does not affect auto scaling decomissioning from an autoscaling policy. + // Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. + // Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and potentially interrupting jobs). + // Default timeout is 0 (for forceful decommission), and the maximum allowed timeout is 1 day. (see JSON representation of + // Duration). + // Only supported on Dataproc image versions 1.2 and higher. + // For more context see the docs + // +kubebuilder:validation:Optional + GracefulDecommissionTimeout *string `json:"gracefulDecommissionTimeout,omitempty" tf:"graceful_decommission_timeout,omitempty"` + + // The list of labels (key/value pairs) to be applied to + // instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name + // which is the name of the cluster. + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The name of the cluster, unique within the project and + // zone. + // +kubebuilder:validation:Required + Name *string `json:"name" tf:"name,omitempty"` + + // The ID of the project in which the cluster will exist. If it + // is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // The region in which the cluster and associated nodes will be created in. + // Defaults to global. + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Allows you to configure a virtual Dataproc on GKE cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + VirtualClusterConfig []VirtualClusterConfigParameters `json:"virtualClusterConfig,omitempty" tf:"virtual_cluster_config,omitempty"` +} + +type ConfigObservation struct { +} + +type ConfigParameters struct { + + // The number of local SSD disks to attach to the node, + // which is limited by the maximum number of disks allowable per zone. + // +kubebuilder:validation:Optional + LocalSsdCount *float64 `json:"localSsdCount,omitempty" tf:"local_ssd_count,omitempty"` + + // The name of a Compute Engine machine type. + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // Minimum CPU platform to be used by this instance. + // The instance may be scheduled on the specified or a newer CPU platform. + // Specify the friendly names of CPU platforms, such as "Intel Haswell" or "Intel Sandy Bridge". + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Whether the nodes are created as preemptible VM instances. + // Preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the + // CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role). + // +kubebuilder:validation:Optional + Preemptible *bool `json:"preemptible,omitempty" tf:"preemptible,omitempty"` + + // Spot flag for enabling Spot VM, which is a rebrand of the existing preemptible flag. + // +kubebuilder:validation:Optional + Spot *bool `json:"spot,omitempty" tf:"spot,omitempty"` +} + +type DiskConfigObservation struct { +} + +type DiskConfigParameters struct { + + // Size of the primary disk attached to each node, specified + // in GB. The primary disk contains the boot volume and system libraries, and the + // smallest allowed disk size is 10GB. GCP will default to a predetermined + // computed value if not set (currently 500GB). Note: If SSDs are not + // attached, it also contains the HDFS data blocks and Hadoop working directories. + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // The disk type of the primary disk attached to each node. + // One of "pd-ssd" or "pd-standard". Defaults to "pd-standard". + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // The amount of local SSD disks that will be + // attached to each master cluster node. Defaults to 0. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +type EncryptionConfigObservation struct { +} + +type EncryptionConfigParameters struct { + + // The Cloud KMS key name to use for PD disk encryption for + // all instances in the cluster. + // +kubebuilder:validation:Required + KMSKeyName *string `json:"kmsKeyName" tf:"kms_key_name,omitempty"` +} + +type EndpointConfigObservation struct { + + // The map of port descriptions to URLs. Will only be populated if + // enable_http_port_access is true. + HTTPPorts map[string]*string `json:"httpPorts,omitempty" tf:"http_ports,omitempty"` +} + +type EndpointConfigParameters struct { + + // The flag to enable http access to specific ports + // on the cluster from external sources (aka Component Gateway). Defaults to false. + // +kubebuilder:validation:Required + EnableHTTPPortAccess *bool `json:"enableHttpPortAccess" tf:"enable_http_port_access,omitempty"` +} + +type GceClusterConfigObservation struct { +} + +type GceClusterConfigParameters struct { + + // By default, clusters are not restricted to internal IP addresses, + // and will have ephemeral external IP addresses assigned to each instance. If set to true, all + // instances in the cluster will only have internal IP addresses. Note: Private Google Access + // (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster + // will be launched in. + // +kubebuilder:validation:Optional + InternalIPOnly *bool `json:"internalIpOnly,omitempty" tf:"internal_ip_only,omitempty"` + + // A map of the Compute Engine metadata entries to add to all instances + // (see Project and instance metadata). + // +kubebuilder:validation:Optional + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // The name or self_link of the Google Compute Engine + // network to the cluster will be part of. Conflicts with subnetwork. + // If neither is specified, this defaults to the "default" network. + // +kubebuilder:validation:Optional + Network *string `json:"network,omitempty" tf:"network,omitempty"` + + // The service account to be used by the Node VMs. + // If not specified, the "default" service account is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1.ServiceAccount + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("email",true) + // +kubebuilder:validation:Optional + ServiceAccount *string `json:"serviceAccount,omitempty" tf:"service_account,omitempty"` + + // Reference to a ServiceAccount in cloudplatform to populate serviceAccount. + // +kubebuilder:validation:Optional + ServiceAccountRef *v1.Reference `json:"serviceAccountRef,omitempty" tf:"-"` + + // The set of Google API scopes + // to be made available on all of the node VMs under the service_account + // specified. Both OAuth2 URLs and gcloud + // short names are supported. To allow full access to all Cloud APIs, use the + // cloud-platform scope. See a complete list of scopes here. + // +kubebuilder:validation:Optional + ServiceAccountScopes []*string `json:"serviceAccountScopes,omitempty" tf:"service_account_scopes,omitempty"` + + // Selector for a ServiceAccount in cloudplatform to populate serviceAccount. + // +kubebuilder:validation:Optional + ServiceAccountSelector *v1.Selector `json:"serviceAccountSelector,omitempty" tf:"-"` + + // Shielded Instance Config for clusters using Compute Engine Shielded VMs. + // +kubebuilder:validation:Optional + ShieldedInstanceConfig []ShieldedInstanceConfigParameters `json:"shieldedInstanceConfig,omitempty" tf:"shielded_instance_config,omitempty"` + + // The name or self_link of the Google Compute Engine + // subnetwork the cluster will be part of. Conflicts with network. + // +kubebuilder:validation:Optional + Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` + + // The list of instance tags applied to instances in the cluster. + // Tags are used to identify valid sources or targets for network firewalls. + // +kubebuilder:validation:Optional + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // The GCP zone where your data is stored and used (i.e. where + // the master and the worker nodes will be created in). If region is set to 'global' (default) + // then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement + // to determine this automatically for you. + // Note: This setting additionally determines and restricts + // which computing resources are available for use with other configs such as + // cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type. + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type GkeClusterConfigObservation struct { +} + +type GkeClusterConfigParameters struct { + + // A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster + // (the GKE cluster can be zonal or regional) + // +kubebuilder:validation:Optional + GkeClusterTarget *string `json:"gkeClusterTarget,omitempty" tf:"gke_cluster_target,omitempty"` + + // GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT + // GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. + // Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings. + // +kubebuilder:validation:Optional + NodePoolTarget []NodePoolTargetParameters `json:"nodePoolTarget,omitempty" tf:"node_pool_target,omitempty"` +} + +type InitializationActionObservation struct { +} + +type InitializationActionParameters struct { + + // The script to be executed during initialization of the cluster. + // The script must be a GCS file with a gs:// prefix. + // +kubebuilder:validation:Required + Script *string `json:"script" tf:"script,omitempty"` + + // The maximum duration (in seconds) which script is + // allowed to take to execute its action. GCP will default to a predetermined + // computed value if not set (currently 300). + // +kubebuilder:validation:Optional + TimeoutSec *float64 `json:"timeoutSec,omitempty" tf:"timeout_sec,omitempty"` +} + +type KerberosConfigObservation struct { +} + +type KerberosConfigParameters struct { + + // The admin server (IP or hostname) for the + // remote trusted realm in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty" tf:"cross_realm_trust_admin_server,omitempty"` + + // The KDC (IP or hostname) for the + // remote trusted realm in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty" tf:"cross_realm_trust_kdc,omitempty"` + + // The remote realm the Dataproc on-cluster KDC will + // trust, should the user enable cross realm trust. + // +kubebuilder:validation:Optional + CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty" tf:"cross_realm_trust_realm,omitempty"` + + // The Cloud Storage URI of a KMS + // encrypted file containing the shared password between the on-cluster Kerberos realm + // and the remote trusted realm, in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustSharedPasswordURI *string `json:"crossRealmTrustSharedPasswordUri,omitempty" tf:"cross_realm_trust_shared_password_uri,omitempty"` + + // Flag to indicate whether to Kerberize the cluster. + // +kubebuilder:validation:Optional + EnableKerberos *bool `json:"enableKerberos,omitempty" tf:"enable_kerberos,omitempty"` + + // The URI of the KMS key used to encrypt various sensitive files. + // +kubebuilder:validation:Required + KMSKeyURI *string `json:"kmsKeyUri" tf:"kms_key_uri,omitempty"` + + // The Cloud Storage URI of a KMS encrypted file containing + // the master key of the KDC database. + // +kubebuilder:validation:Optional + KdcDBKeyURI *string `json:"kdcDbKeyUri,omitempty" tf:"kdc_db_key_uri,omitempty"` + + // The Cloud Storage URI of a KMS encrypted file containing + // the password to the user provided key. For the self-signed certificate, this password + // is generated by Dataproc. + // +kubebuilder:validation:Optional + KeyPasswordURI *string `json:"keyPasswordUri,omitempty" tf:"key_password_uri,omitempty"` + + // The Cloud Storage URI of a KMS encrypted file containing + // the password to the user provided keystore. For the self-signed certificated, the password + // is generated by Dataproc. + // +kubebuilder:validation:Optional + KeystorePasswordURI *string `json:"keystorePasswordUri,omitempty" tf:"keystore_password_uri,omitempty"` + + // The Cloud Storage URI of the keystore file used for SSL encryption. + // If not provided, Dataproc will provide a self-signed certificate. + // +kubebuilder:validation:Optional + KeystoreURI *string `json:"keystoreUri,omitempty" tf:"keystore_uri,omitempty"` + + // The name of the on-cluster Kerberos realm. If not specified, the + // uppercased domain of hostnames will be the realm. + // +kubebuilder:validation:Optional + Realm *string `json:"realm,omitempty" tf:"realm,omitempty"` + + // The Cloud Storage URI of a KMS encrypted file + // containing the root principal password. + // +kubebuilder:validation:Required + RootPrincipalPasswordURI *string `json:"rootPrincipalPasswordUri" tf:"root_principal_password_uri,omitempty"` + + // The lifetime of the ticket granting ticket, in hours. + // +kubebuilder:validation:Optional + TgtLifetimeHours *float64 `json:"tgtLifetimeHours,omitempty" tf:"tgt_lifetime_hours,omitempty"` + + // The Cloud Storage URI of a KMS encrypted file + // containing the password to the user provided truststore. For the self-signed + // certificate, this password is generated by Dataproc. + // +kubebuilder:validation:Optional + TruststorePasswordURI *string `json:"truststorePasswordUri,omitempty" tf:"truststore_password_uri,omitempty"` + + // The Cloud Storage URI of the truststore file used for + // SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + // +kubebuilder:validation:Optional + TruststoreURI *string `json:"truststoreUri,omitempty" tf:"truststore_uri,omitempty"` +} + +type KubernetesClusterConfigObservation struct { +} + +type KubernetesClusterConfigParameters struct { + + // The configuration for running the Dataproc cluster on GKE. + // +kubebuilder:validation:Required + GkeClusterConfig []GkeClusterConfigParameters `json:"gkeClusterConfig" tf:"gke_cluster_config,omitempty"` + + // A namespace within the Kubernetes cluster to deploy into. + // If this namespace does not exist, it is created. + // If it exists, Dataproc verifies that another Dataproc VirtualCluster is not installed into it. + // If not specified, the name of the Dataproc Cluster is used. + // +kubebuilder:validation:Optional + KubernetesNamespace *string `json:"kubernetesNamespace,omitempty" tf:"kubernetes_namespace,omitempty"` + + // The software configuration for this Dataproc cluster running on Kubernetes. + // +kubebuilder:validation:Required + KubernetesSoftwareConfig []KubernetesSoftwareConfigParameters `json:"kubernetesSoftwareConfig" tf:"kubernetes_software_config,omitempty"` +} + +type KubernetesSoftwareConfigObservation struct { +} + +type KubernetesSoftwareConfigParameters struct { + + // The components that should be installed in this Dataproc cluster. The key must be a string from the + // KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified. + // +kubebuilder:validation:Required + ComponentVersion map[string]*string `json:"componentVersion" tf:"component_version,omitempty"` + + // The properties to set on daemon config files. Property keys are specified in prefix:property format, + // for example spark:spark.kubernetes.container.image. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type LifecycleConfigObservation struct { + + // Time when the cluster became idle + // (most recent job finished) and became eligible for deletion due to idleness. + IdleStartTime *string `json:"idleStartTime,omitempty" tf:"idle_start_time,omitempty"` +} + +type LifecycleConfigParameters struct { + + // The time when cluster will be auto-deleted. + // A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. + // Example: "2014-10-02T15:01:23.045123456Z". + // +kubebuilder:validation:Optional + AutoDeleteTime *string `json:"autoDeleteTime,omitempty" tf:"auto_delete_time,omitempty"` + + // The duration to keep the cluster alive while idling + // (no jobs running). After this TTL, the cluster will be deleted. Valid range: [10m, 14d]. + // +kubebuilder:validation:Optional + IdleDeleteTTL *string `json:"idleDeleteTtl,omitempty" tf:"idle_delete_ttl,omitempty"` +} + +type MasterConfigObservation struct { + + // List of worker instance names which have been assigned + // to the cluster. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` +} + +type MasterConfigParameters struct { + + // The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times. + // +kubebuilder:validation:Optional + Accelerators []AcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + + // Disk Config + // +kubebuilder:validation:Optional + DiskConfig []DiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // The URI for the image to use for this worker. See the guide + // for more information. + // +kubebuilder:validation:Optional + ImageURI *string `json:"imageUri,omitempty" tf:"image_uri,omitempty"` + + // The name of a Google Compute Engine machine type + // to create for the master. If not specified, GCP will default to a predetermined + // computed value (currently n1-standard-4). + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // The name of a minimum generation of CPU family + // for the master. If not specified, GCP will default to a predetermined computed value + // for each zone. See the guide + // for details about which CPU families are available (and defaulted) for each zone. + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Specifies the number of master nodes to create. + // If not specified, GCP will default to a predetermined computed value (currently 1). + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` +} + +type MetastoreConfigObservation struct { +} + +type MetastoreConfigParameters struct { + + // Resource name of an existing Dataproc Metastore service. + // +kubebuilder:validation:Required + DataprocMetastoreService *string `json:"dataprocMetastoreService" tf:"dataproc_metastore_service,omitempty"` +} + +type NodePoolConfigObservation struct { +} + +type NodePoolConfigParameters struct { + + // The autoscaler configuration for this node pool. + // The autoscaler is enabled only when a valid configuration is present. + // +kubebuilder:validation:Optional + Autoscaling []AutoscalingParameters `json:"autoscaling,omitempty" tf:"autoscaling,omitempty"` + + // The node pool configuration. + // +kubebuilder:validation:Optional + Config []ConfigParameters `json:"config,omitempty" tf:"config,omitempty"` + + // The list of Compute Engine zones where node pool nodes associated + // with a Dataproc on GKE virtual cluster will be located. + // +kubebuilder:validation:Required + Locations []*string `json:"locations" tf:"locations,omitempty"` +} + +type NodePoolTargetObservation struct { +} + +type NodePoolTargetParameters struct { + + // The target GKE node pool. + // +kubebuilder:validation:Required + NodePool *string `json:"nodePool" tf:"node_pool,omitempty"` + + // (Input only) The configuration for the GKE node pool. + // If specified, Dataproc attempts to create a node pool with the specified shape. + // If one with the same name already exists, it is verified against all specified fields. + // If a field differs, the virtual cluster creation will fail. + // +kubebuilder:validation:Optional + NodePoolConfig []NodePoolConfigParameters `json:"nodePoolConfig,omitempty" tf:"node_pool_config,omitempty"` + + // The roles associated with the GKE node pool. + // One of "DEFAULT", "CONTROLLER", "SPARK_DRIVER" or "SPARK_EXECUTOR". + // +kubebuilder:validation:Required + Roles []*string `json:"roles" tf:"roles,omitempty"` +} + +type PreemptibleWorkerConfigDiskConfigObservation struct { +} + +type PreemptibleWorkerConfigDiskConfigParameters struct { + + // Size of the primary disk attached to each node, specified + // in GB. The primary disk contains the boot volume and system libraries, and the + // smallest allowed disk size is 10GB. GCP will default to a predetermined + // computed value if not set (currently 500GB). Note: If SSDs are not + // attached, it also contains the HDFS data blocks and Hadoop working directories. + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // The disk type of the primary disk attached to each node. + // One of "pd-ssd" or "pd-standard". Defaults to "pd-standard". + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // The amount of local SSD disks that will be + // attached to each master cluster node. Defaults to 0. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +type PreemptibleWorkerConfigObservation struct { + + // List of worker instance names which have been assigned + // to the cluster. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` +} + +type PreemptibleWorkerConfigParameters struct { + + // Disk Config + // +kubebuilder:validation:Optional + DiskConfig []PreemptibleWorkerConfigDiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // Specifies the number of preemptible nodes to create. + // Defaults to 0. + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` + + // Specifies the preemptibility of the secondary workers. The default value is PREEMPTIBLE + // Accepted values are: + // +kubebuilder:validation:Optional + Preemptibility *string `json:"preemptibility,omitempty" tf:"preemptibility,omitempty"` +} + +type SecurityConfigObservation struct { +} + +type SecurityConfigParameters struct { + + // Kerberos Configuration + // +kubebuilder:validation:Required + KerberosConfig []KerberosConfigParameters `json:"kerberosConfig" tf:"kerberos_config,omitempty"` +} + +type ShieldedInstanceConfigObservation struct { +} + +type ShieldedInstanceConfigParameters struct { + + // Defines whether instances have integrity monitoring enabled. + // +kubebuilder:validation:Optional + EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring,omitempty" tf:"enable_integrity_monitoring,omitempty"` + + // Defines whether instances have Secure Boot enabled. + // +kubebuilder:validation:Optional + EnableSecureBoot *bool `json:"enableSecureBoot,omitempty" tf:"enable_secure_boot,omitempty"` + + // Defines whether instances have the vTPM enabled. + // +kubebuilder:validation:Optional + EnableVtpm *bool `json:"enableVtpm,omitempty" tf:"enable_vtpm,omitempty"` +} + +type SoftwareConfigObservation struct { + + // The properties to set on daemon config files. Property keys are specified in prefix:property format, + // for example spark:spark.kubernetes.container.image. + Properties map[string]string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type SoftwareConfigParameters struct { + + // The Cloud Dataproc image version to use + // for the cluster - this controls the sets of software versions + // installed onto the nodes when you create clusters. If not specified, defaults to the + // latest version. For a list of valid versions see + // Cloud Dataproc versions + // +kubebuilder:validation:Optional + ImageVersion *string `json:"imageVersion,omitempty" tf:"image_version,omitempty"` + + // The set of optional components to activate on the cluster. + // Accepted values are: + // +kubebuilder:validation:Optional + OptionalComponents []*string `json:"optionalComponents,omitempty" tf:"optional_components,omitempty"` + + // A list of override and additional properties (key/value pairs) + // used to modify various aspects of the common configuration files used when creating + // a cluster. For a list of valid properties please see + // Cluster properties + // +kubebuilder:validation:Optional + OverrideProperties map[string]*string `json:"overrideProperties,omitempty" tf:"override_properties,omitempty"` +} + +type SparkHistoryServerConfigObservation struct { +} + +type SparkHistoryServerConfigParameters struct { + + // Resource name of an existing Dataproc Cluster to act as a Spark History Server for the workload. + // +kubebuilder:validation:Optional + DataprocCluster *string `json:"dataprocCluster,omitempty" tf:"dataproc_cluster,omitempty"` +} + +type VirtualClusterConfigObservation struct { +} + +type VirtualClusterConfigParameters struct { + + // Configuration of auxiliary services used by this cluster. + // Structure defined below. + // +kubebuilder:validation:Optional + AuxiliaryServicesConfig []AuxiliaryServicesConfigParameters `json:"auxiliaryServicesConfig,omitempty" tf:"auxiliary_services_config,omitempty"` + + // The configuration for running the Dataproc cluster on Kubernetes. + // Structure defined below. + // +kubebuilder:validation:Optional + KubernetesClusterConfig []KubernetesClusterConfigParameters `json:"kubernetesClusterConfig,omitempty" tf:"kubernetes_cluster_config,omitempty"` + + // The Cloud Storage staging bucket used to stage files, + // such as Hadoop jars, between client machines and the cluster. + // Note: If you don't explicitly specify a staging_bucket + // then GCP will auto create / assign one for you. However, you are not guaranteed + // an auto generated bucket which is solely dedicated to your cluster; it may be shared + // with other clusters in the same region/zone also choosing to use the auto generation + // option. + // +kubebuilder:validation:Optional + StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` +} + +type WorkerConfigAcceleratorsObservation struct { +} + +type WorkerConfigAcceleratorsParameters struct { + + // The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8. + // +kubebuilder:validation:Required + AcceleratorCount *float64 `json:"acceleratorCount" tf:"accelerator_count,omitempty"` + + // The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80. + // +kubebuilder:validation:Required + AcceleratorType *string `json:"acceleratorType" tf:"accelerator_type,omitempty"` +} + +type WorkerConfigDiskConfigObservation struct { +} + +type WorkerConfigDiskConfigParameters struct { + + // Size of the primary disk attached to each node, specified + // in GB. The primary disk contains the boot volume and system libraries, and the + // smallest allowed disk size is 10GB. GCP will default to a predetermined + // computed value if not set (currently 500GB). Note: If SSDs are not + // attached, it also contains the HDFS data blocks and Hadoop working directories. + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // The disk type of the primary disk attached to each node. + // One of "pd-ssd" or "pd-standard". Defaults to "pd-standard". + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // The amount of local SSD disks that will be + // attached to each master cluster node. Defaults to 0. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider ClusterParameters `json:"forProvider"` +} + +// ClusterStatus defines the observed state of Cluster. +type ClusterStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider ClusterObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Cluster is the Schema for the Clusters API. Manages a Cloud Dataproc cluster resource. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ClusterSpec `json:"spec"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Clusters +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +// Repository type metadata. +var ( + Cluster_Kind = "Cluster" + Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() + Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() + Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) +) + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/apis/dataproc/v1beta1/zz_generated.deepcopy.go b/apis/dataproc/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bc7ef66bb --- /dev/null +++ b/apis/dataproc/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,7112 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "github.com/crossplane/crossplane-runtime/apis/common/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorsObservation) DeepCopyInto(out *AcceleratorsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorsObservation. +func (in *AcceleratorsObservation) DeepCopy() *AcceleratorsObservation { + if in == nil { + return nil + } + out := new(AcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcceleratorsParameters) DeepCopyInto(out *AcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcceleratorsParameters. +func (in *AcceleratorsParameters) DeepCopy() *AcceleratorsParameters { + if in == nil { + return nil + } + out := new(AcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigObservation) DeepCopyInto(out *AutoscalingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigObservation. +func (in *AutoscalingConfigObservation) DeepCopy() *AutoscalingConfigObservation { + if in == nil { + return nil + } + out := new(AutoscalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingConfigParameters) DeepCopyInto(out *AutoscalingConfigParameters) { + *out = *in + if in.PolicyURI != nil { + in, out := &in.PolicyURI, &out.PolicyURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingConfigParameters. +func (in *AutoscalingConfigParameters) DeepCopy() *AutoscalingConfigParameters { + if in == nil { + return nil + } + out := new(AutoscalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingObservation) DeepCopyInto(out *AutoscalingObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingObservation. +func (in *AutoscalingObservation) DeepCopy() *AutoscalingObservation { + if in == nil { + return nil + } + out := new(AutoscalingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingParameters) DeepCopyInto(out *AutoscalingParameters) { + *out = *in + if in.MaxNodeCount != nil { + in, out := &in.MaxNodeCount, &out.MaxNodeCount + *out = new(float64) + **out = **in + } + if in.MinNodeCount != nil { + in, out := &in.MinNodeCount, &out.MinNodeCount + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingParameters. +func (in *AutoscalingParameters) DeepCopy() *AutoscalingParameters { + if in == nil { + return nil + } + out := new(AutoscalingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicy) DeepCopyInto(out *AutoscalingPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicy. +func (in *AutoscalingPolicy) DeepCopy() *AutoscalingPolicy { + if in == nil { + return nil + } + out := new(AutoscalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicyList) DeepCopyInto(out *AutoscalingPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutoscalingPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicyList. +func (in *AutoscalingPolicyList) DeepCopy() *AutoscalingPolicyList { + if in == nil { + return nil + } + out := new(AutoscalingPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutoscalingPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicyObservation) DeepCopyInto(out *AutoscalingPolicyObservation) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicyObservation. +func (in *AutoscalingPolicyObservation) DeepCopy() *AutoscalingPolicyObservation { + if in == nil { + return nil + } + out := new(AutoscalingPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicyParameters) DeepCopyInto(out *AutoscalingPolicyParameters) { + *out = *in + if in.BasicAlgorithm != nil { + in, out := &in.BasicAlgorithm, &out.BasicAlgorithm + *out = make([]BasicAlgorithmParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.SecondaryWorkerConfig != nil { + in, out := &in.SecondaryWorkerConfig, &out.SecondaryWorkerConfig + *out = make([]SecondaryWorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = make([]WorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicyParameters. +func (in *AutoscalingPolicyParameters) DeepCopy() *AutoscalingPolicyParameters { + if in == nil { + return nil + } + out := new(AutoscalingPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicySpec) DeepCopyInto(out *AutoscalingPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicySpec. +func (in *AutoscalingPolicySpec) DeepCopy() *AutoscalingPolicySpec { + if in == nil { + return nil + } + out := new(AutoscalingPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalingPolicyStatus) DeepCopyInto(out *AutoscalingPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingPolicyStatus. +func (in *AutoscalingPolicyStatus) DeepCopy() *AutoscalingPolicyStatus { + if in == nil { + return nil + } + out := new(AutoscalingPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuxiliaryServicesConfigMetastoreConfigObservation) DeepCopyInto(out *AuxiliaryServicesConfigMetastoreConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuxiliaryServicesConfigMetastoreConfigObservation. +func (in *AuxiliaryServicesConfigMetastoreConfigObservation) DeepCopy() *AuxiliaryServicesConfigMetastoreConfigObservation { + if in == nil { + return nil + } + out := new(AuxiliaryServicesConfigMetastoreConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuxiliaryServicesConfigMetastoreConfigParameters) DeepCopyInto(out *AuxiliaryServicesConfigMetastoreConfigParameters) { + *out = *in + if in.DataprocMetastoreService != nil { + in, out := &in.DataprocMetastoreService, &out.DataprocMetastoreService + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuxiliaryServicesConfigMetastoreConfigParameters. +func (in *AuxiliaryServicesConfigMetastoreConfigParameters) DeepCopy() *AuxiliaryServicesConfigMetastoreConfigParameters { + if in == nil { + return nil + } + out := new(AuxiliaryServicesConfigMetastoreConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuxiliaryServicesConfigObservation) DeepCopyInto(out *AuxiliaryServicesConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuxiliaryServicesConfigObservation. +func (in *AuxiliaryServicesConfigObservation) DeepCopy() *AuxiliaryServicesConfigObservation { + if in == nil { + return nil + } + out := new(AuxiliaryServicesConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuxiliaryServicesConfigParameters) DeepCopyInto(out *AuxiliaryServicesConfigParameters) { + *out = *in + if in.MetastoreConfig != nil { + in, out := &in.MetastoreConfig, &out.MetastoreConfig + *out = make([]AuxiliaryServicesConfigMetastoreConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparkHistoryServerConfig != nil { + in, out := &in.SparkHistoryServerConfig, &out.SparkHistoryServerConfig + *out = make([]SparkHistoryServerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuxiliaryServicesConfigParameters. +func (in *AuxiliaryServicesConfigParameters) DeepCopy() *AuxiliaryServicesConfigParameters { + if in == nil { + return nil + } + out := new(AuxiliaryServicesConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAlgorithmObservation) DeepCopyInto(out *BasicAlgorithmObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAlgorithmObservation. +func (in *BasicAlgorithmObservation) DeepCopy() *BasicAlgorithmObservation { + if in == nil { + return nil + } + out := new(BasicAlgorithmObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAlgorithmParameters) DeepCopyInto(out *BasicAlgorithmParameters) { + *out = *in + if in.CooldownPeriod != nil { + in, out := &in.CooldownPeriod, &out.CooldownPeriod + *out = new(string) + **out = **in + } + if in.YarnConfig != nil { + in, out := &in.YarnConfig, &out.YarnConfig + *out = make([]YarnConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAlgorithmParameters. +func (in *BasicAlgorithmParameters) DeepCopy() *BasicAlgorithmParameters { + if in == nil { + return nil + } + out := new(BasicAlgorithmParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigObservation) DeepCopyInto(out *ClusterConfigObservation) { + *out = *in + if in.Bucket != nil { + in, out := &in.Bucket, &out.Bucket + *out = new(string) + **out = **in + } + if in.EndpointConfig != nil { + in, out := &in.EndpointConfig, &out.EndpointConfig + *out = make([]EndpointConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleConfig != nil { + in, out := &in.LifecycleConfig, &out.LifecycleConfig + *out = make([]LifecycleConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterConfig != nil { + in, out := &in.MasterConfig, &out.MasterConfig + *out = make([]MasterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreemptibleWorkerConfig != nil { + in, out := &in.PreemptibleWorkerConfig, &out.PreemptibleWorkerConfig + *out = make([]PreemptibleWorkerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareConfig != nil { + in, out := &in.SoftwareConfig, &out.SoftwareConfig + *out = make([]SoftwareConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = make([]ClusterConfigWorkerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigObservation. +func (in *ClusterConfigObservation) DeepCopy() *ClusterConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigParameters) DeepCopyInto(out *ClusterConfigParameters) { + *out = *in + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]AutoscalingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = make([]EncryptionConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EndpointConfig != nil { + in, out := &in.EndpointConfig, &out.EndpointConfig + *out = make([]EndpointConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GceClusterConfig != nil { + in, out := &in.GceClusterConfig, &out.GceClusterConfig + *out = make([]GceClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitializationAction != nil { + in, out := &in.InitializationAction, &out.InitializationAction + *out = make([]InitializationActionParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleConfig != nil { + in, out := &in.LifecycleConfig, &out.LifecycleConfig + *out = make([]LifecycleConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterConfig != nil { + in, out := &in.MasterConfig, &out.MasterConfig + *out = make([]MasterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MetastoreConfig != nil { + in, out := &in.MetastoreConfig, &out.MetastoreConfig + *out = make([]MetastoreConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreemptibleWorkerConfig != nil { + in, out := &in.PreemptibleWorkerConfig, &out.PreemptibleWorkerConfig + *out = make([]PreemptibleWorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityConfig != nil { + in, out := &in.SecurityConfig, &out.SecurityConfig + *out = make([]SecurityConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareConfig != nil { + in, out := &in.SoftwareConfig, &out.SoftwareConfig + *out = make([]SoftwareConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StagingBucket != nil { + in, out := &in.StagingBucket, &out.StagingBucket + *out = new(string) + **out = **in + } + if in.TempBucket != nil { + in, out := &in.TempBucket, &out.TempBucket + *out = new(string) + **out = **in + } + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = make([]ClusterConfigWorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigParameters. +func (in *ClusterConfigParameters) DeepCopy() *ClusterConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigWorkerConfigObservation) DeepCopyInto(out *ClusterConfigWorkerConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigWorkerConfigObservation. +func (in *ClusterConfigWorkerConfigObservation) DeepCopy() *ClusterConfigWorkerConfigObservation { + if in == nil { + return nil + } + out := new(ClusterConfigWorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfigWorkerConfigParameters) DeepCopyInto(out *ClusterConfigWorkerConfigParameters) { + *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]WorkerConfigAcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]WorkerConfigDiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigWorkerConfigParameters. +func (in *ClusterConfigWorkerConfigParameters) DeepCopy() *ClusterConfigWorkerConfigParameters { + if in == nil { + return nil + } + out := new(ClusterConfigWorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation) { + *out = *in + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation. +func (in *ClusterObservation) DeepCopy() *ClusterObservation { + if in == nil { + return nil + } + out := new(ClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters) { + *out = *in + if in.ClusterConfig != nil { + in, out := &in.ClusterConfig, &out.ClusterConfig + *out = make([]ClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GracefulDecommissionTimeout != nil { + in, out := &in.GracefulDecommissionTimeout, &out.GracefulDecommissionTimeout + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.VirtualClusterConfig != nil { + in, out := &in.VirtualClusterConfig, &out.VirtualClusterConfig + *out = make([]VirtualClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters. +func (in *ClusterParameters) DeepCopy() *ClusterParameters { + if in == nil { + return nil + } + out := new(ClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSelectorObservation) DeepCopyInto(out *ClusterSelectorObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelectorObservation. +func (in *ClusterSelectorObservation) DeepCopy() *ClusterSelectorObservation { + if in == nil { + return nil + } + out := new(ClusterSelectorObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSelectorParameters) DeepCopyInto(out *ClusterSelectorParameters) { + *out = *in + if in.ClusterLabels != nil { + in, out := &in.ClusterLabels, &out.ClusterLabels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelectorParameters. +func (in *ClusterSelectorParameters) DeepCopy() *ClusterSelectorParameters { + if in == nil { + return nil + } + out := new(ClusterSelectorParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigAutoscalingConfigObservation) DeepCopyInto(out *ConfigAutoscalingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigAutoscalingConfigObservation. +func (in *ConfigAutoscalingConfigObservation) DeepCopy() *ConfigAutoscalingConfigObservation { + if in == nil { + return nil + } + out := new(ConfigAutoscalingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigAutoscalingConfigParameters) DeepCopyInto(out *ConfigAutoscalingConfigParameters) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigAutoscalingConfigParameters. +func (in *ConfigAutoscalingConfigParameters) DeepCopy() *ConfigAutoscalingConfigParameters { + if in == nil { + return nil + } + out := new(ConfigAutoscalingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigEncryptionConfigObservation) DeepCopyInto(out *ConfigEncryptionConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigEncryptionConfigObservation. +func (in *ConfigEncryptionConfigObservation) DeepCopy() *ConfigEncryptionConfigObservation { + if in == nil { + return nil + } + out := new(ConfigEncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigEncryptionConfigParameters) DeepCopyInto(out *ConfigEncryptionConfigParameters) { + *out = *in + if in.GcePdKMSKeyName != nil { + in, out := &in.GcePdKMSKeyName, &out.GcePdKMSKeyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigEncryptionConfigParameters. +func (in *ConfigEncryptionConfigParameters) DeepCopy() *ConfigEncryptionConfigParameters { + if in == nil { + return nil + } + out := new(ConfigEncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigEndpointConfigObservation) DeepCopyInto(out *ConfigEndpointConfigObservation) { + *out = *in + if in.HTTPPorts != nil { + in, out := &in.HTTPPorts, &out.HTTPPorts + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigEndpointConfigObservation. +func (in *ConfigEndpointConfigObservation) DeepCopy() *ConfigEndpointConfigObservation { + if in == nil { + return nil + } + out := new(ConfigEndpointConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigEndpointConfigParameters) DeepCopyInto(out *ConfigEndpointConfigParameters) { + *out = *in + if in.EnableHTTPPortAccess != nil { + in, out := &in.EnableHTTPPortAccess, &out.EnableHTTPPortAccess + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigEndpointConfigParameters. +func (in *ConfigEndpointConfigParameters) DeepCopy() *ConfigEndpointConfigParameters { + if in == nil { + return nil + } + out := new(ConfigEndpointConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigGceClusterConfigObservation) DeepCopyInto(out *ConfigGceClusterConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigGceClusterConfigObservation. +func (in *ConfigGceClusterConfigObservation) DeepCopy() *ConfigGceClusterConfigObservation { + if in == nil { + return nil + } + out := new(ConfigGceClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigGceClusterConfigParameters) DeepCopyInto(out *ConfigGceClusterConfigParameters) { + *out = *in + if in.InternalIPOnly != nil { + in, out := &in.InternalIPOnly, &out.InternalIPOnly + *out = new(bool) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.NodeGroupAffinity != nil { + in, out := &in.NodeGroupAffinity, &out.NodeGroupAffinity + *out = make([]NodeGroupAffinityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrivateIPv6GoogleAccess != nil { + in, out := &in.PrivateIPv6GoogleAccess, &out.PrivateIPv6GoogleAccess + *out = new(string) + **out = **in + } + if in.ReservationAffinity != nil { + in, out := &in.ReservationAffinity, &out.ReservationAffinity + *out = make([]ReservationAffinityParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(string) + **out = **in + } + if in.ServiceAccountScopes != nil { + in, out := &in.ServiceAccountScopes, &out.ServiceAccountScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ShieldedInstanceConfig != nil { + in, out := &in.ShieldedInstanceConfig, &out.ShieldedInstanceConfig + *out = make([]GceClusterConfigShieldedInstanceConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Subnetwork != nil { + in, out := &in.Subnetwork, &out.Subnetwork + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigGceClusterConfigParameters. +func (in *ConfigGceClusterConfigParameters) DeepCopy() *ConfigGceClusterConfigParameters { + if in == nil { + return nil + } + out := new(ConfigGceClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigLifecycleConfigObservation) DeepCopyInto(out *ConfigLifecycleConfigObservation) { + *out = *in + if in.IdleStartTime != nil { + in, out := &in.IdleStartTime, &out.IdleStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigLifecycleConfigObservation. +func (in *ConfigLifecycleConfigObservation) DeepCopy() *ConfigLifecycleConfigObservation { + if in == nil { + return nil + } + out := new(ConfigLifecycleConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigLifecycleConfigParameters) DeepCopyInto(out *ConfigLifecycleConfigParameters) { + *out = *in + if in.AutoDeleteTTL != nil { + in, out := &in.AutoDeleteTTL, &out.AutoDeleteTTL + *out = new(string) + **out = **in + } + if in.AutoDeleteTime != nil { + in, out := &in.AutoDeleteTime, &out.AutoDeleteTime + *out = new(string) + **out = **in + } + if in.IdleDeleteTTL != nil { + in, out := &in.IdleDeleteTTL, &out.IdleDeleteTTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigLifecycleConfigParameters. +func (in *ConfigLifecycleConfigParameters) DeepCopy() *ConfigLifecycleConfigParameters { + if in == nil { + return nil + } + out := new(ConfigLifecycleConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMasterConfigObservation) DeepCopyInto(out *ConfigMasterConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IsPreemptible != nil { + in, out := &in.IsPreemptible, &out.IsPreemptible + *out = new(bool) + **out = **in + } + if in.ManagedGroupConfig != nil { + in, out := &in.ManagedGroupConfig, &out.ManagedGroupConfig + *out = make([]ManagedGroupConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMasterConfigObservation. +func (in *ConfigMasterConfigObservation) DeepCopy() *ConfigMasterConfigObservation { + if in == nil { + return nil + } + out := new(ConfigMasterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMasterConfigParameters) DeepCopyInto(out *ConfigMasterConfigParameters) { + *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]MasterConfigAcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]MasterConfigDiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } + if in.Preemptibility != nil { + in, out := &in.Preemptibility, &out.Preemptibility + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMasterConfigParameters. +func (in *ConfigMasterConfigParameters) DeepCopy() *ConfigMasterConfigParameters { + if in == nil { + return nil + } + out := new(ConfigMasterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigObservation) DeepCopyInto(out *ConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigObservation. +func (in *ConfigObservation) DeepCopy() *ConfigObservation { + if in == nil { + return nil + } + out := new(ConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigParameters) DeepCopyInto(out *ConfigParameters) { + *out = *in + if in.LocalSsdCount != nil { + in, out := &in.LocalSsdCount, &out.LocalSsdCount + *out = new(float64) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.Preemptible != nil { + in, out := &in.Preemptible, &out.Preemptible + *out = new(bool) + **out = **in + } + if in.Spot != nil { + in, out := &in.Spot, &out.Spot + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigParameters. +func (in *ConfigParameters) DeepCopy() *ConfigParameters { + if in == nil { + return nil + } + out := new(ConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSecondaryWorkerConfigObservation) DeepCopyInto(out *ConfigSecondaryWorkerConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IsPreemptible != nil { + in, out := &in.IsPreemptible, &out.IsPreemptible + *out = new(bool) + **out = **in + } + if in.ManagedGroupConfig != nil { + in, out := &in.ManagedGroupConfig, &out.ManagedGroupConfig + *out = make([]SecondaryWorkerConfigManagedGroupConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSecondaryWorkerConfigObservation. +func (in *ConfigSecondaryWorkerConfigObservation) DeepCopy() *ConfigSecondaryWorkerConfigObservation { + if in == nil { + return nil + } + out := new(ConfigSecondaryWorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSecondaryWorkerConfigParameters) DeepCopyInto(out *ConfigSecondaryWorkerConfigParameters) { + *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]SecondaryWorkerConfigAcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]SecondaryWorkerConfigDiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } + if in.Preemptibility != nil { + in, out := &in.Preemptibility, &out.Preemptibility + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSecondaryWorkerConfigParameters. +func (in *ConfigSecondaryWorkerConfigParameters) DeepCopy() *ConfigSecondaryWorkerConfigParameters { + if in == nil { + return nil + } + out := new(ConfigSecondaryWorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSecurityConfigObservation) DeepCopyInto(out *ConfigSecurityConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSecurityConfigObservation. +func (in *ConfigSecurityConfigObservation) DeepCopy() *ConfigSecurityConfigObservation { + if in == nil { + return nil + } + out := new(ConfigSecurityConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSecurityConfigParameters) DeepCopyInto(out *ConfigSecurityConfigParameters) { + *out = *in + if in.KerberosConfig != nil { + in, out := &in.KerberosConfig, &out.KerberosConfig + *out = make([]SecurityConfigKerberosConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSecurityConfigParameters. +func (in *ConfigSecurityConfigParameters) DeepCopy() *ConfigSecurityConfigParameters { + if in == nil { + return nil + } + out := new(ConfigSecurityConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSoftwareConfigObservation) DeepCopyInto(out *ConfigSoftwareConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSoftwareConfigObservation. +func (in *ConfigSoftwareConfigObservation) DeepCopy() *ConfigSoftwareConfigObservation { + if in == nil { + return nil + } + out := new(ConfigSoftwareConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSoftwareConfigParameters) DeepCopyInto(out *ConfigSoftwareConfigParameters) { + *out = *in + if in.ImageVersion != nil { + in, out := &in.ImageVersion, &out.ImageVersion + *out = new(string) + **out = **in + } + if in.OptionalComponents != nil { + in, out := &in.OptionalComponents, &out.OptionalComponents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSoftwareConfigParameters. +func (in *ConfigSoftwareConfigParameters) DeepCopy() *ConfigSoftwareConfigParameters { + if in == nil { + return nil + } + out := new(ConfigSoftwareConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigAcceleratorsObservation) DeepCopyInto(out *ConfigWorkerConfigAcceleratorsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigAcceleratorsObservation. +func (in *ConfigWorkerConfigAcceleratorsObservation) DeepCopy() *ConfigWorkerConfigAcceleratorsObservation { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigAcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigAcceleratorsParameters) DeepCopyInto(out *ConfigWorkerConfigAcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigAcceleratorsParameters. +func (in *ConfigWorkerConfigAcceleratorsParameters) DeepCopy() *ConfigWorkerConfigAcceleratorsParameters { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigAcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigDiskConfigObservation) DeepCopyInto(out *ConfigWorkerConfigDiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigDiskConfigObservation. +func (in *ConfigWorkerConfigDiskConfigObservation) DeepCopy() *ConfigWorkerConfigDiskConfigObservation { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigDiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigDiskConfigParameters) DeepCopyInto(out *ConfigWorkerConfigDiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigDiskConfigParameters. +func (in *ConfigWorkerConfigDiskConfigParameters) DeepCopy() *ConfigWorkerConfigDiskConfigParameters { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigDiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigObservation) DeepCopyInto(out *ConfigWorkerConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.IsPreemptible != nil { + in, out := &in.IsPreemptible, &out.IsPreemptible + *out = new(bool) + **out = **in + } + if in.ManagedGroupConfig != nil { + in, out := &in.ManagedGroupConfig, &out.ManagedGroupConfig + *out = make([]WorkerConfigManagedGroupConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigObservation. +func (in *ConfigWorkerConfigObservation) DeepCopy() *ConfigWorkerConfigObservation { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigWorkerConfigParameters) DeepCopyInto(out *ConfigWorkerConfigParameters) { + *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]ConfigWorkerConfigAcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]ConfigWorkerConfigDiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } + if in.Preemptibility != nil { + in, out := &in.Preemptibility, &out.Preemptibility + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigWorkerConfigParameters. +func (in *ConfigWorkerConfigParameters) DeepCopy() *ConfigWorkerConfigParameters { + if in == nil { + return nil + } + out := new(ConfigWorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskConfigObservation) DeepCopyInto(out *DiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskConfigObservation. +func (in *DiskConfigObservation) DeepCopy() *DiskConfigObservation { + if in == nil { + return nil + } + out := new(DiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskConfigParameters) DeepCopyInto(out *DiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskConfigParameters. +func (in *DiskConfigParameters) DeepCopy() *DiskConfigParameters { + if in == nil { + return nil + } + out := new(DiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigObservation) DeepCopyInto(out *EncryptionConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigObservation. +func (in *EncryptionConfigObservation) DeepCopy() *EncryptionConfigObservation { + if in == nil { + return nil + } + out := new(EncryptionConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfigParameters) DeepCopyInto(out *EncryptionConfigParameters) { + *out = *in + if in.KMSKeyName != nil { + in, out := &in.KMSKeyName, &out.KMSKeyName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfigParameters. +func (in *EncryptionConfigParameters) DeepCopy() *EncryptionConfigParameters { + if in == nil { + return nil + } + out := new(EncryptionConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigObservation) DeepCopyInto(out *EndpointConfigObservation) { + *out = *in + if in.HTTPPorts != nil { + in, out := &in.HTTPPorts, &out.HTTPPorts + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigObservation. +func (in *EndpointConfigObservation) DeepCopy() *EndpointConfigObservation { + if in == nil { + return nil + } + out := new(EndpointConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointConfigParameters) DeepCopyInto(out *EndpointConfigParameters) { + *out = *in + if in.EnableHTTPPortAccess != nil { + in, out := &in.EnableHTTPPortAccess, &out.EnableHTTPPortAccess + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigParameters. +func (in *EndpointConfigParameters) DeepCopy() *EndpointConfigParameters { + if in == nil { + return nil + } + out := new(EndpointConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GceClusterConfigObservation) DeepCopyInto(out *GceClusterConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GceClusterConfigObservation. +func (in *GceClusterConfigObservation) DeepCopy() *GceClusterConfigObservation { + if in == nil { + return nil + } + out := new(GceClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GceClusterConfigParameters) DeepCopyInto(out *GceClusterConfigParameters) { + *out = *in + if in.InternalIPOnly != nil { + in, out := &in.InternalIPOnly, &out.InternalIPOnly + *out = new(bool) + **out = **in + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(string) + **out = **in + } + if in.ServiceAccountRef != nil { + in, out := &in.ServiceAccountRef, &out.ServiceAccountRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountScopes != nil { + in, out := &in.ServiceAccountScopes, &out.ServiceAccountScopes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ServiceAccountSelector != nil { + in, out := &in.ServiceAccountSelector, &out.ServiceAccountSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.ShieldedInstanceConfig != nil { + in, out := &in.ShieldedInstanceConfig, &out.ShieldedInstanceConfig + *out = make([]ShieldedInstanceConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Subnetwork != nil { + in, out := &in.Subnetwork, &out.Subnetwork + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GceClusterConfigParameters. +func (in *GceClusterConfigParameters) DeepCopy() *GceClusterConfigParameters { + if in == nil { + return nil + } + out := new(GceClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GceClusterConfigShieldedInstanceConfigObservation) DeepCopyInto(out *GceClusterConfigShieldedInstanceConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GceClusterConfigShieldedInstanceConfigObservation. +func (in *GceClusterConfigShieldedInstanceConfigObservation) DeepCopy() *GceClusterConfigShieldedInstanceConfigObservation { + if in == nil { + return nil + } + out := new(GceClusterConfigShieldedInstanceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GceClusterConfigShieldedInstanceConfigParameters) DeepCopyInto(out *GceClusterConfigShieldedInstanceConfigParameters) { + *out = *in + if in.EnableIntegrityMonitoring != nil { + in, out := &in.EnableIntegrityMonitoring, &out.EnableIntegrityMonitoring + *out = new(bool) + **out = **in + } + if in.EnableSecureBoot != nil { + in, out := &in.EnableSecureBoot, &out.EnableSecureBoot + *out = new(bool) + **out = **in + } + if in.EnableVtpm != nil { + in, out := &in.EnableVtpm, &out.EnableVtpm + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GceClusterConfigShieldedInstanceConfigParameters. +func (in *GceClusterConfigShieldedInstanceConfigParameters) DeepCopy() *GceClusterConfigShieldedInstanceConfigParameters { + if in == nil { + return nil + } + out := new(GceClusterConfigShieldedInstanceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GkeClusterConfigObservation) DeepCopyInto(out *GkeClusterConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GkeClusterConfigObservation. +func (in *GkeClusterConfigObservation) DeepCopy() *GkeClusterConfigObservation { + if in == nil { + return nil + } + out := new(GkeClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GkeClusterConfigParameters) DeepCopyInto(out *GkeClusterConfigParameters) { + *out = *in + if in.GkeClusterTarget != nil { + in, out := &in.GkeClusterTarget, &out.GkeClusterTarget + *out = new(string) + **out = **in + } + if in.NodePoolTarget != nil { + in, out := &in.NodePoolTarget, &out.NodePoolTarget + *out = make([]NodePoolTargetParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GkeClusterConfigParameters. +func (in *GkeClusterConfigParameters) DeepCopy() *GkeClusterConfigParameters { + if in == nil { + return nil + } + out := new(GkeClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopConfigObservation) DeepCopyInto(out *HadoopConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopConfigObservation. +func (in *HadoopConfigObservation) DeepCopy() *HadoopConfigObservation { + if in == nil { + return nil + } + out := new(HadoopConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopConfigParameters) DeepCopyInto(out *HadoopConfigParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]LoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainJarFileURI != nil { + in, out := &in.MainJarFileURI, &out.MainJarFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopConfigParameters. +func (in *HadoopConfigParameters) DeepCopy() *HadoopConfigParameters { + if in == nil { + return nil + } + out := new(HadoopConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopJobLoggingConfigObservation) DeepCopyInto(out *HadoopJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopJobLoggingConfigObservation. +func (in *HadoopJobLoggingConfigObservation) DeepCopy() *HadoopJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(HadoopJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopJobLoggingConfigParameters) DeepCopyInto(out *HadoopJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopJobLoggingConfigParameters. +func (in *HadoopJobLoggingConfigParameters) DeepCopy() *HadoopJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(HadoopJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopJobObservation) DeepCopyInto(out *HadoopJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopJobObservation. +func (in *HadoopJobObservation) DeepCopy() *HadoopJobObservation { + if in == nil { + return nil + } + out := new(HadoopJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HadoopJobParameters) DeepCopyInto(out *HadoopJobParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]HadoopJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainJarFileURI != nil { + in, out := &in.MainJarFileURI, &out.MainJarFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HadoopJobParameters. +func (in *HadoopJobParameters) DeepCopy() *HadoopJobParameters { + if in == nil { + return nil + } + out := new(HadoopJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigObservation) DeepCopyInto(out *HiveConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigObservation. +func (in *HiveConfigObservation) DeepCopy() *HiveConfigObservation { + if in == nil { + return nil + } + out := new(HiveConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveConfigParameters) DeepCopyInto(out *HiveConfigParameters) { + *out = *in + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveConfigParameters. +func (in *HiveConfigParameters) DeepCopy() *HiveConfigParameters { + if in == nil { + return nil + } + out := new(HiveConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJobObservation) DeepCopyInto(out *HiveJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJobObservation. +func (in *HiveJobObservation) DeepCopy() *HiveJobObservation { + if in == nil { + return nil + } + out := new(HiveJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HiveJobParameters) DeepCopyInto(out *HiveJobParameters) { + *out = *in + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]QueryListParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HiveJobParameters. +func (in *HiveJobParameters) DeepCopy() *HiveJobParameters { + if in == nil { + return nil + } + out := new(HiveJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionObservation) DeepCopyInto(out *InitializationActionObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionObservation. +func (in *InitializationActionObservation) DeepCopy() *InitializationActionObservation { + if in == nil { + return nil + } + out := new(InitializationActionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionParameters) DeepCopyInto(out *InitializationActionParameters) { + *out = *in + if in.Script != nil { + in, out := &in.Script, &out.Script + *out = new(string) + **out = **in + } + if in.TimeoutSec != nil { + in, out := &in.TimeoutSec, &out.TimeoutSec + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionParameters. +func (in *InitializationActionParameters) DeepCopy() *InitializationActionParameters { + if in == nil { + return nil + } + out := new(InitializationActionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionsObservation) DeepCopyInto(out *InitializationActionsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionsObservation. +func (in *InitializationActionsObservation) DeepCopy() *InitializationActionsObservation { + if in == nil { + return nil + } + out := new(InitializationActionsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitializationActionsParameters) DeepCopyInto(out *InitializationActionsParameters) { + *out = *in + if in.ExecutableFile != nil { + in, out := &in.ExecutableFile, &out.ExecutableFile + *out = new(string) + **out = **in + } + if in.ExecutionTimeout != nil { + in, out := &in.ExecutionTimeout, &out.ExecutionTimeout + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializationActionsParameters. +func (in *InitializationActionsParameters) DeepCopy() *InitializationActionsParameters { + if in == nil { + return nil + } + out := new(InitializationActionsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobObservation) DeepCopyInto(out *JobObservation) { + *out = *in + if in.DriverControlsFilesURI != nil { + in, out := &in.DriverControlsFilesURI, &out.DriverControlsFilesURI + *out = new(string) + **out = **in + } + if in.DriverOutputResourceURI != nil { + in, out := &in.DriverOutputResourceURI, &out.DriverOutputResourceURI + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make([]PlacementObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = make([]StatusObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobObservation. +func (in *JobObservation) DeepCopy() *JobObservation { + if in == nil { + return nil + } + out := new(JobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobParameters) DeepCopyInto(out *JobParameters) { + *out = *in + if in.ForceDelete != nil { + in, out := &in.ForceDelete, &out.ForceDelete + *out = new(bool) + **out = **in + } + if in.HadoopConfig != nil { + in, out := &in.HadoopConfig, &out.HadoopConfig + *out = make([]HadoopConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HiveConfig != nil { + in, out := &in.HiveConfig, &out.HiveConfig + *out = make([]HiveConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PigConfig != nil { + in, out := &in.PigConfig, &out.PigConfig + *out = make([]PigConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make([]PlacementParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrestoConfig != nil { + in, out := &in.PrestoConfig, &out.PrestoConfig + *out = make([]PrestoConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.PysparkConfig != nil { + in, out := &in.PysparkConfig, &out.PysparkConfig + *out = make([]PysparkConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = make([]ReferenceParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Region != nil { + in, out := &in.Region, &out.Region + *out = new(string) + **out = **in + } + if in.RegionRef != nil { + in, out := &in.RegionRef, &out.RegionRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.RegionSelector != nil { + in, out := &in.RegionSelector, &out.RegionSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]SchedulingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparkConfig != nil { + in, out := &in.SparkConfig, &out.SparkConfig + *out = make([]SparkConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparksqlConfig != nil { + in, out := &in.SparksqlConfig, &out.SparksqlConfig + *out = make([]SparksqlConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobParameters. +func (in *JobParameters) DeepCopy() *JobParameters { + if in == nil { + return nil + } + out := new(JobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobsObservation) DeepCopyInto(out *JobsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobsObservation. +func (in *JobsObservation) DeepCopy() *JobsObservation { + if in == nil { + return nil + } + out := new(JobsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobsParameters) DeepCopyInto(out *JobsParameters) { + *out = *in + if in.HadoopJob != nil { + in, out := &in.HadoopJob, &out.HadoopJob + *out = make([]HadoopJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HiveJob != nil { + in, out := &in.HiveJob, &out.HiveJob + *out = make([]HiveJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PigJob != nil { + in, out := &in.PigJob, &out.PigJob + *out = make([]PigJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrerequisiteStepIds != nil { + in, out := &in.PrerequisiteStepIds, &out.PrerequisiteStepIds + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.PrestoJob != nil { + in, out := &in.PrestoJob, &out.PrestoJob + *out = make([]PrestoJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PysparkJob != nil { + in, out := &in.PysparkJob, &out.PysparkJob + *out = make([]PysparkJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = make([]JobsSchedulingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparkJob != nil { + in, out := &in.SparkJob, &out.SparkJob + *out = make([]SparkJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparkRJob != nil { + in, out := &in.SparkRJob, &out.SparkRJob + *out = make([]SparkRJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SparkSQLJob != nil { + in, out := &in.SparkSQLJob, &out.SparkSQLJob + *out = make([]SparkSQLJobParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepID != nil { + in, out := &in.StepID, &out.StepID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobsParameters. +func (in *JobsParameters) DeepCopy() *JobsParameters { + if in == nil { + return nil + } + out := new(JobsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobsSchedulingObservation) DeepCopyInto(out *JobsSchedulingObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobsSchedulingObservation. +func (in *JobsSchedulingObservation) DeepCopy() *JobsSchedulingObservation { + if in == nil { + return nil + } + out := new(JobsSchedulingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobsSchedulingParameters) DeepCopyInto(out *JobsSchedulingParameters) { + *out = *in + if in.MaxFailuresPerHour != nil { + in, out := &in.MaxFailuresPerHour, &out.MaxFailuresPerHour + *out = new(float64) + **out = **in + } + if in.MaxFailuresTotal != nil { + in, out := &in.MaxFailuresTotal, &out.MaxFailuresTotal + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobsSchedulingParameters. +func (in *JobsSchedulingParameters) DeepCopy() *JobsSchedulingParameters { + if in == nil { + return nil + } + out := new(JobsSchedulingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KerberosConfigObservation) DeepCopyInto(out *KerberosConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KerberosConfigObservation. +func (in *KerberosConfigObservation) DeepCopy() *KerberosConfigObservation { + if in == nil { + return nil + } + out := new(KerberosConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KerberosConfigParameters) DeepCopyInto(out *KerberosConfigParameters) { + *out = *in + if in.CrossRealmTrustAdminServer != nil { + in, out := &in.CrossRealmTrustAdminServer, &out.CrossRealmTrustAdminServer + *out = new(string) + **out = **in + } + if in.CrossRealmTrustKdc != nil { + in, out := &in.CrossRealmTrustKdc, &out.CrossRealmTrustKdc + *out = new(string) + **out = **in + } + if in.CrossRealmTrustRealm != nil { + in, out := &in.CrossRealmTrustRealm, &out.CrossRealmTrustRealm + *out = new(string) + **out = **in + } + if in.CrossRealmTrustSharedPasswordURI != nil { + in, out := &in.CrossRealmTrustSharedPasswordURI, &out.CrossRealmTrustSharedPasswordURI + *out = new(string) + **out = **in + } + if in.EnableKerberos != nil { + in, out := &in.EnableKerberos, &out.EnableKerberos + *out = new(bool) + **out = **in + } + if in.KMSKeyURI != nil { + in, out := &in.KMSKeyURI, &out.KMSKeyURI + *out = new(string) + **out = **in + } + if in.KdcDBKeyURI != nil { + in, out := &in.KdcDBKeyURI, &out.KdcDBKeyURI + *out = new(string) + **out = **in + } + if in.KeyPasswordURI != nil { + in, out := &in.KeyPasswordURI, &out.KeyPasswordURI + *out = new(string) + **out = **in + } + if in.KeystorePasswordURI != nil { + in, out := &in.KeystorePasswordURI, &out.KeystorePasswordURI + *out = new(string) + **out = **in + } + if in.KeystoreURI != nil { + in, out := &in.KeystoreURI, &out.KeystoreURI + *out = new(string) + **out = **in + } + if in.Realm != nil { + in, out := &in.Realm, &out.Realm + *out = new(string) + **out = **in + } + if in.RootPrincipalPasswordURI != nil { + in, out := &in.RootPrincipalPasswordURI, &out.RootPrincipalPasswordURI + *out = new(string) + **out = **in + } + if in.TgtLifetimeHours != nil { + in, out := &in.TgtLifetimeHours, &out.TgtLifetimeHours + *out = new(float64) + **out = **in + } + if in.TruststorePasswordURI != nil { + in, out := &in.TruststorePasswordURI, &out.TruststorePasswordURI + *out = new(string) + **out = **in + } + if in.TruststoreURI != nil { + in, out := &in.TruststoreURI, &out.TruststoreURI + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KerberosConfigParameters. +func (in *KerberosConfigParameters) DeepCopy() *KerberosConfigParameters { + if in == nil { + return nil + } + out := new(KerberosConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterConfigObservation) DeepCopyInto(out *KubernetesClusterConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterConfigObservation. +func (in *KubernetesClusterConfigObservation) DeepCopy() *KubernetesClusterConfigObservation { + if in == nil { + return nil + } + out := new(KubernetesClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesClusterConfigParameters) DeepCopyInto(out *KubernetesClusterConfigParameters) { + *out = *in + if in.GkeClusterConfig != nil { + in, out := &in.GkeClusterConfig, &out.GkeClusterConfig + *out = make([]GkeClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubernetesNamespace != nil { + in, out := &in.KubernetesNamespace, &out.KubernetesNamespace + *out = new(string) + **out = **in + } + if in.KubernetesSoftwareConfig != nil { + in, out := &in.KubernetesSoftwareConfig, &out.KubernetesSoftwareConfig + *out = make([]KubernetesSoftwareConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesClusterConfigParameters. +func (in *KubernetesClusterConfigParameters) DeepCopy() *KubernetesClusterConfigParameters { + if in == nil { + return nil + } + out := new(KubernetesClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSoftwareConfigObservation) DeepCopyInto(out *KubernetesSoftwareConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSoftwareConfigObservation. +func (in *KubernetesSoftwareConfigObservation) DeepCopy() *KubernetesSoftwareConfigObservation { + if in == nil { + return nil + } + out := new(KubernetesSoftwareConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSoftwareConfigParameters) DeepCopyInto(out *KubernetesSoftwareConfigParameters) { + *out = *in + if in.ComponentVersion != nil { + in, out := &in.ComponentVersion, &out.ComponentVersion + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSoftwareConfigParameters. +func (in *KubernetesSoftwareConfigParameters) DeepCopy() *KubernetesSoftwareConfigParameters { + if in == nil { + return nil + } + out := new(KubernetesSoftwareConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleConfigObservation) DeepCopyInto(out *LifecycleConfigObservation) { + *out = *in + if in.IdleStartTime != nil { + in, out := &in.IdleStartTime, &out.IdleStartTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleConfigObservation. +func (in *LifecycleConfigObservation) DeepCopy() *LifecycleConfigObservation { + if in == nil { + return nil + } + out := new(LifecycleConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleConfigParameters) DeepCopyInto(out *LifecycleConfigParameters) { + *out = *in + if in.AutoDeleteTime != nil { + in, out := &in.AutoDeleteTime, &out.AutoDeleteTime + *out = new(string) + **out = **in + } + if in.IdleDeleteTTL != nil { + in, out := &in.IdleDeleteTTL, &out.IdleDeleteTTL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleConfigParameters. +func (in *LifecycleConfigParameters) DeepCopy() *LifecycleConfigParameters { + if in == nil { + return nil + } + out := new(LifecycleConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigObservation) DeepCopyInto(out *LoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigObservation. +func (in *LoggingConfigObservation) DeepCopy() *LoggingConfigObservation { + if in == nil { + return nil + } + out := new(LoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfigParameters) DeepCopyInto(out *LoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfigParameters. +func (in *LoggingConfigParameters) DeepCopy() *LoggingConfigParameters { + if in == nil { + return nil + } + out := new(LoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterConfigObservation) DeepCopyInto(out *ManagedClusterConfigObservation) { + *out = *in + if in.EndpointConfig != nil { + in, out := &in.EndpointConfig, &out.EndpointConfig + *out = make([]ConfigEndpointConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleConfig != nil { + in, out := &in.LifecycleConfig, &out.LifecycleConfig + *out = make([]ConfigLifecycleConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterConfig != nil { + in, out := &in.MasterConfig, &out.MasterConfig + *out = make([]ConfigMasterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryWorkerConfig != nil { + in, out := &in.SecondaryWorkerConfig, &out.SecondaryWorkerConfig + *out = make([]ConfigSecondaryWorkerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = make([]ConfigWorkerConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterConfigObservation. +func (in *ManagedClusterConfigObservation) DeepCopy() *ManagedClusterConfigObservation { + if in == nil { + return nil + } + out := new(ManagedClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterConfigParameters) DeepCopyInto(out *ManagedClusterConfigParameters) { + *out = *in + if in.AutoscalingConfig != nil { + in, out := &in.AutoscalingConfig, &out.AutoscalingConfig + *out = make([]ConfigAutoscalingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EncryptionConfig != nil { + in, out := &in.EncryptionConfig, &out.EncryptionConfig + *out = make([]ConfigEncryptionConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EndpointConfig != nil { + in, out := &in.EndpointConfig, &out.EndpointConfig + *out = make([]ConfigEndpointConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.GceClusterConfig != nil { + in, out := &in.GceClusterConfig, &out.GceClusterConfig + *out = make([]ConfigGceClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitializationActions != nil { + in, out := &in.InitializationActions, &out.InitializationActions + *out = make([]InitializationActionsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LifecycleConfig != nil { + in, out := &in.LifecycleConfig, &out.LifecycleConfig + *out = make([]ConfigLifecycleConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MasterConfig != nil { + in, out := &in.MasterConfig, &out.MasterConfig + *out = make([]ConfigMasterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecondaryWorkerConfig != nil { + in, out := &in.SecondaryWorkerConfig, &out.SecondaryWorkerConfig + *out = make([]ConfigSecondaryWorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityConfig != nil { + in, out := &in.SecurityConfig, &out.SecurityConfig + *out = make([]ConfigSecurityConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareConfig != nil { + in, out := &in.SoftwareConfig, &out.SoftwareConfig + *out = make([]ConfigSoftwareConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StagingBucket != nil { + in, out := &in.StagingBucket, &out.StagingBucket + *out = new(string) + **out = **in + } + if in.TempBucket != nil { + in, out := &in.TempBucket, &out.TempBucket + *out = new(string) + **out = **in + } + if in.WorkerConfig != nil { + in, out := &in.WorkerConfig, &out.WorkerConfig + *out = make([]ConfigWorkerConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterConfigParameters. +func (in *ManagedClusterConfigParameters) DeepCopy() *ManagedClusterConfigParameters { + if in == nil { + return nil + } + out := new(ManagedClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterObservation) DeepCopyInto(out *ManagedClusterObservation) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ManagedClusterConfigObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterObservation. +func (in *ManagedClusterObservation) DeepCopy() *ManagedClusterObservation { + if in == nil { + return nil + } + out := new(ManagedClusterObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterParameters) DeepCopyInto(out *ManagedClusterParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ManagedClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterParameters. +func (in *ManagedClusterParameters) DeepCopy() *ManagedClusterParameters { + if in == nil { + return nil + } + out := new(ManagedClusterParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedGroupConfigObservation) DeepCopyInto(out *ManagedGroupConfigObservation) { + *out = *in + if in.InstanceGroupManagerName != nil { + in, out := &in.InstanceGroupManagerName, &out.InstanceGroupManagerName + *out = new(string) + **out = **in + } + if in.InstanceTemplateName != nil { + in, out := &in.InstanceTemplateName, &out.InstanceTemplateName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedGroupConfigObservation. +func (in *ManagedGroupConfigObservation) DeepCopy() *ManagedGroupConfigObservation { + if in == nil { + return nil + } + out := new(ManagedGroupConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedGroupConfigParameters) DeepCopyInto(out *ManagedGroupConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedGroupConfigParameters. +func (in *ManagedGroupConfigParameters) DeepCopy() *ManagedGroupConfigParameters { + if in == nil { + return nil + } + out := new(ManagedGroupConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigAcceleratorsObservation) DeepCopyInto(out *MasterConfigAcceleratorsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigAcceleratorsObservation. +func (in *MasterConfigAcceleratorsObservation) DeepCopy() *MasterConfigAcceleratorsObservation { + if in == nil { + return nil + } + out := new(MasterConfigAcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigAcceleratorsParameters) DeepCopyInto(out *MasterConfigAcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigAcceleratorsParameters. +func (in *MasterConfigAcceleratorsParameters) DeepCopy() *MasterConfigAcceleratorsParameters { + if in == nil { + return nil + } + out := new(MasterConfigAcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigDiskConfigObservation) DeepCopyInto(out *MasterConfigDiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigDiskConfigObservation. +func (in *MasterConfigDiskConfigObservation) DeepCopy() *MasterConfigDiskConfigObservation { + if in == nil { + return nil + } + out := new(MasterConfigDiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigDiskConfigParameters) DeepCopyInto(out *MasterConfigDiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigDiskConfigParameters. +func (in *MasterConfigDiskConfigParameters) DeepCopy() *MasterConfigDiskConfigParameters { + if in == nil { + return nil + } + out := new(MasterConfigDiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigObservation) DeepCopyInto(out *MasterConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigObservation. +func (in *MasterConfigObservation) DeepCopy() *MasterConfigObservation { + if in == nil { + return nil + } + out := new(MasterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfigParameters) DeepCopyInto(out *MasterConfigParameters) { + *out = *in + if in.Accelerators != nil { + in, out := &in.Accelerators, &out.Accelerators + *out = make([]AcceleratorsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]DiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageURI != nil { + in, out := &in.ImageURI, &out.ImageURI + *out = new(string) + **out = **in + } + if in.MachineType != nil { + in, out := &in.MachineType, &out.MachineType + *out = new(string) + **out = **in + } + if in.MinCPUPlatform != nil { + in, out := &in.MinCPUPlatform, &out.MinCPUPlatform + *out = new(string) + **out = **in + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfigParameters. +func (in *MasterConfigParameters) DeepCopy() *MasterConfigParameters { + if in == nil { + return nil + } + out := new(MasterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoreConfigObservation) DeepCopyInto(out *MetastoreConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoreConfigObservation. +func (in *MetastoreConfigObservation) DeepCopy() *MetastoreConfigObservation { + if in == nil { + return nil + } + out := new(MetastoreConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetastoreConfigParameters) DeepCopyInto(out *MetastoreConfigParameters) { + *out = *in + if in.DataprocMetastoreService != nil { + in, out := &in.DataprocMetastoreService, &out.DataprocMetastoreService + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetastoreConfigParameters. +func (in *MetastoreConfigParameters) DeepCopy() *MetastoreConfigParameters { + if in == nil { + return nil + } + out := new(MetastoreConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupAffinityObservation) DeepCopyInto(out *NodeGroupAffinityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupAffinityObservation. +func (in *NodeGroupAffinityObservation) DeepCopy() *NodeGroupAffinityObservation { + if in == nil { + return nil + } + out := new(NodeGroupAffinityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeGroupAffinityParameters) DeepCopyInto(out *NodeGroupAffinityParameters) { + *out = *in + if in.NodeGroup != nil { + in, out := &in.NodeGroup, &out.NodeGroup + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeGroupAffinityParameters. +func (in *NodeGroupAffinityParameters) DeepCopy() *NodeGroupAffinityParameters { + if in == nil { + return nil + } + out := new(NodeGroupAffinityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolConfigObservation) DeepCopyInto(out *NodePoolConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolConfigObservation. +func (in *NodePoolConfigObservation) DeepCopy() *NodePoolConfigObservation { + if in == nil { + return nil + } + out := new(NodePoolConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolConfigParameters) DeepCopyInto(out *NodePoolConfigParameters) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = make([]AutoscalingParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]ConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Locations != nil { + in, out := &in.Locations, &out.Locations + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolConfigParameters. +func (in *NodePoolConfigParameters) DeepCopy() *NodePoolConfigParameters { + if in == nil { + return nil + } + out := new(NodePoolConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolTargetObservation) DeepCopyInto(out *NodePoolTargetObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolTargetObservation. +func (in *NodePoolTargetObservation) DeepCopy() *NodePoolTargetObservation { + if in == nil { + return nil + } + out := new(NodePoolTargetObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolTargetParameters) DeepCopyInto(out *NodePoolTargetParameters) { + *out = *in + if in.NodePool != nil { + in, out := &in.NodePool, &out.NodePool + *out = new(string) + **out = **in + } + if in.NodePoolConfig != nil { + in, out := &in.NodePoolConfig, &out.NodePoolConfig + *out = make([]NodePoolConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolTargetParameters. +func (in *NodePoolTargetParameters) DeepCopy() *NodePoolTargetParameters { + if in == nil { + return nil + } + out := new(NodePoolTargetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersObservation) DeepCopyInto(out *ParametersObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersObservation. +func (in *ParametersObservation) DeepCopy() *ParametersObservation { + if in == nil { + return nil + } + out := new(ParametersObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParametersParameters) DeepCopyInto(out *ParametersParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = make([]ValidationParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParametersParameters. +func (in *ParametersParameters) DeepCopy() *ParametersParameters { + if in == nil { + return nil + } + out := new(ParametersParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigConfigLoggingConfigObservation) DeepCopyInto(out *PigConfigLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigConfigLoggingConfigObservation. +func (in *PigConfigLoggingConfigObservation) DeepCopy() *PigConfigLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PigConfigLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigConfigLoggingConfigParameters) DeepCopyInto(out *PigConfigLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigConfigLoggingConfigParameters. +func (in *PigConfigLoggingConfigParameters) DeepCopy() *PigConfigLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PigConfigLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigConfigObservation) DeepCopyInto(out *PigConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigConfigObservation. +func (in *PigConfigObservation) DeepCopy() *PigConfigObservation { + if in == nil { + return nil + } + out := new(PigConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigConfigParameters) DeepCopyInto(out *PigConfigParameters) { + *out = *in + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PigConfigLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigConfigParameters. +func (in *PigConfigParameters) DeepCopy() *PigConfigParameters { + if in == nil { + return nil + } + out := new(PigConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobLoggingConfigObservation) DeepCopyInto(out *PigJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobLoggingConfigObservation. +func (in *PigJobLoggingConfigObservation) DeepCopy() *PigJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PigJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobLoggingConfigParameters) DeepCopyInto(out *PigJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobLoggingConfigParameters. +func (in *PigJobLoggingConfigParameters) DeepCopy() *PigJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PigJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobObservation) DeepCopyInto(out *PigJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobObservation. +func (in *PigJobObservation) DeepCopy() *PigJobObservation { + if in == nil { + return nil + } + out := new(PigJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobParameters) DeepCopyInto(out *PigJobParameters) { + *out = *in + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PigJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]PigJobQueryListParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobParameters. +func (in *PigJobParameters) DeepCopy() *PigJobParameters { + if in == nil { + return nil + } + out := new(PigJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobQueryListObservation) DeepCopyInto(out *PigJobQueryListObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobQueryListObservation. +func (in *PigJobQueryListObservation) DeepCopy() *PigJobQueryListObservation { + if in == nil { + return nil + } + out := new(PigJobQueryListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PigJobQueryListParameters) DeepCopyInto(out *PigJobQueryListParameters) { + *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PigJobQueryListParameters. +func (in *PigJobQueryListParameters) DeepCopy() *PigJobQueryListParameters { + if in == nil { + return nil + } + out := new(PigJobQueryListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementObservation) DeepCopyInto(out *PlacementObservation) { + *out = *in + if in.ClusterUUID != nil { + in, out := &in.ClusterUUID, &out.ClusterUUID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementObservation. +func (in *PlacementObservation) DeepCopy() *PlacementObservation { + if in == nil { + return nil + } + out := new(PlacementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementParameters) DeepCopyInto(out *PlacementParameters) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.ClusterNameRef != nil { + in, out := &in.ClusterNameRef, &out.ClusterNameRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ClusterNameSelector != nil { + in, out := &in.ClusterNameSelector, &out.ClusterNameSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementParameters. +func (in *PlacementParameters) DeepCopy() *PlacementParameters { + if in == nil { + return nil + } + out := new(PlacementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreemptibleWorkerConfigDiskConfigObservation) DeepCopyInto(out *PreemptibleWorkerConfigDiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptibleWorkerConfigDiskConfigObservation. +func (in *PreemptibleWorkerConfigDiskConfigObservation) DeepCopy() *PreemptibleWorkerConfigDiskConfigObservation { + if in == nil { + return nil + } + out := new(PreemptibleWorkerConfigDiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreemptibleWorkerConfigDiskConfigParameters) DeepCopyInto(out *PreemptibleWorkerConfigDiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptibleWorkerConfigDiskConfigParameters. +func (in *PreemptibleWorkerConfigDiskConfigParameters) DeepCopy() *PreemptibleWorkerConfigDiskConfigParameters { + if in == nil { + return nil + } + out := new(PreemptibleWorkerConfigDiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreemptibleWorkerConfigObservation) DeepCopyInto(out *PreemptibleWorkerConfigObservation) { + *out = *in + if in.InstanceNames != nil { + in, out := &in.InstanceNames, &out.InstanceNames + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptibleWorkerConfigObservation. +func (in *PreemptibleWorkerConfigObservation) DeepCopy() *PreemptibleWorkerConfigObservation { + if in == nil { + return nil + } + out := new(PreemptibleWorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreemptibleWorkerConfigParameters) DeepCopyInto(out *PreemptibleWorkerConfigParameters) { + *out = *in + if in.DiskConfig != nil { + in, out := &in.DiskConfig, &out.DiskConfig + *out = make([]PreemptibleWorkerConfigDiskConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NumInstances != nil { + in, out := &in.NumInstances, &out.NumInstances + *out = new(float64) + **out = **in + } + if in.Preemptibility != nil { + in, out := &in.Preemptibility, &out.Preemptibility + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptibleWorkerConfigParameters. +func (in *PreemptibleWorkerConfigParameters) DeepCopy() *PreemptibleWorkerConfigParameters { + if in == nil { + return nil + } + out := new(PreemptibleWorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoConfigLoggingConfigObservation) DeepCopyInto(out *PrestoConfigLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoConfigLoggingConfigObservation. +func (in *PrestoConfigLoggingConfigObservation) DeepCopy() *PrestoConfigLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PrestoConfigLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoConfigLoggingConfigParameters) DeepCopyInto(out *PrestoConfigLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoConfigLoggingConfigParameters. +func (in *PrestoConfigLoggingConfigParameters) DeepCopy() *PrestoConfigLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PrestoConfigLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoConfigObservation) DeepCopyInto(out *PrestoConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoConfigObservation. +func (in *PrestoConfigObservation) DeepCopy() *PrestoConfigObservation { + if in == nil { + return nil + } + out := new(PrestoConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoConfigParameters) DeepCopyInto(out *PrestoConfigParameters) { + *out = *in + if in.ClientTags != nil { + in, out := &in.ClientTags, &out.ClientTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PrestoConfigLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoConfigParameters. +func (in *PrestoConfigParameters) DeepCopy() *PrestoConfigParameters { + if in == nil { + return nil + } + out := new(PrestoConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobLoggingConfigObservation) DeepCopyInto(out *PrestoJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobLoggingConfigObservation. +func (in *PrestoJobLoggingConfigObservation) DeepCopy() *PrestoJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PrestoJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobLoggingConfigParameters) DeepCopyInto(out *PrestoJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobLoggingConfigParameters. +func (in *PrestoJobLoggingConfigParameters) DeepCopy() *PrestoJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PrestoJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobObservation) DeepCopyInto(out *PrestoJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobObservation. +func (in *PrestoJobObservation) DeepCopy() *PrestoJobObservation { + if in == nil { + return nil + } + out := new(PrestoJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobParameters) DeepCopyInto(out *PrestoJobParameters) { + *out = *in + if in.ClientTags != nil { + in, out := &in.ClientTags, &out.ClientTags + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ContinueOnFailure != nil { + in, out := &in.ContinueOnFailure, &out.ContinueOnFailure + *out = new(bool) + **out = **in + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PrestoJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutputFormat != nil { + in, out := &in.OutputFormat, &out.OutputFormat + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]PrestoJobQueryListParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobParameters. +func (in *PrestoJobParameters) DeepCopy() *PrestoJobParameters { + if in == nil { + return nil + } + out := new(PrestoJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobQueryListObservation) DeepCopyInto(out *PrestoJobQueryListObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobQueryListObservation. +func (in *PrestoJobQueryListObservation) DeepCopy() *PrestoJobQueryListObservation { + if in == nil { + return nil + } + out := new(PrestoJobQueryListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrestoJobQueryListParameters) DeepCopyInto(out *PrestoJobQueryListParameters) { + *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrestoJobQueryListParameters. +func (in *PrestoJobQueryListParameters) DeepCopy() *PrestoJobQueryListParameters { + if in == nil { + return nil + } + out := new(PrestoJobQueryListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkConfigLoggingConfigObservation) DeepCopyInto(out *PysparkConfigLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkConfigLoggingConfigObservation. +func (in *PysparkConfigLoggingConfigObservation) DeepCopy() *PysparkConfigLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PysparkConfigLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkConfigLoggingConfigParameters) DeepCopyInto(out *PysparkConfigLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkConfigLoggingConfigParameters. +func (in *PysparkConfigLoggingConfigParameters) DeepCopy() *PysparkConfigLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PysparkConfigLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkConfigObservation) DeepCopyInto(out *PysparkConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkConfigObservation. +func (in *PysparkConfigObservation) DeepCopy() *PysparkConfigObservation { + if in == nil { + return nil + } + out := new(PysparkConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkConfigParameters) DeepCopyInto(out *PysparkConfigParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PysparkConfigLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainPythonFileURI != nil { + in, out := &in.MainPythonFileURI, &out.MainPythonFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PythonFileUris != nil { + in, out := &in.PythonFileUris, &out.PythonFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkConfigParameters. +func (in *PysparkConfigParameters) DeepCopy() *PysparkConfigParameters { + if in == nil { + return nil + } + out := new(PysparkConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkJobLoggingConfigObservation) DeepCopyInto(out *PysparkJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkJobLoggingConfigObservation. +func (in *PysparkJobLoggingConfigObservation) DeepCopy() *PysparkJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(PysparkJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkJobLoggingConfigParameters) DeepCopyInto(out *PysparkJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkJobLoggingConfigParameters. +func (in *PysparkJobLoggingConfigParameters) DeepCopy() *PysparkJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(PysparkJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkJobObservation) DeepCopyInto(out *PysparkJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkJobObservation. +func (in *PysparkJobObservation) DeepCopy() *PysparkJobObservation { + if in == nil { + return nil + } + out := new(PysparkJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PysparkJobParameters) DeepCopyInto(out *PysparkJobParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]PysparkJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainPythonFileURI != nil { + in, out := &in.MainPythonFileURI, &out.MainPythonFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.PythonFileUris != nil { + in, out := &in.PythonFileUris, &out.PythonFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PysparkJobParameters. +func (in *PysparkJobParameters) DeepCopy() *PysparkJobParameters { + if in == nil { + return nil + } + out := new(PysparkJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryListObservation) DeepCopyInto(out *QueryListObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryListObservation. +func (in *QueryListObservation) DeepCopy() *QueryListObservation { + if in == nil { + return nil + } + out := new(QueryListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueryListParameters) DeepCopyInto(out *QueryListParameters) { + *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueryListParameters. +func (in *QueryListParameters) DeepCopy() *QueryListParameters { + if in == nil { + return nil + } + out := new(QueryListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceObservation) DeepCopyInto(out *ReferenceObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceObservation. +func (in *ReferenceObservation) DeepCopy() *ReferenceObservation { + if in == nil { + return nil + } + out := new(ReferenceObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceParameters) DeepCopyInto(out *ReferenceParameters) { + *out = *in + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceParameters. +func (in *ReferenceParameters) DeepCopy() *ReferenceParameters { + if in == nil { + return nil + } + out := new(ReferenceParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexObservation) DeepCopyInto(out *RegexObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexObservation. +func (in *RegexObservation) DeepCopy() *RegexObservation { + if in == nil { + return nil + } + out := new(RegexObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegexParameters) DeepCopyInto(out *RegexParameters) { + *out = *in + if in.Regexes != nil { + in, out := &in.Regexes, &out.Regexes + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegexParameters. +func (in *RegexParameters) DeepCopy() *RegexParameters { + if in == nil { + return nil + } + out := new(RegexParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservationAffinityObservation) DeepCopyInto(out *ReservationAffinityObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservationAffinityObservation. +func (in *ReservationAffinityObservation) DeepCopy() *ReservationAffinityObservation { + if in == nil { + return nil + } + out := new(ReservationAffinityObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReservationAffinityParameters) DeepCopyInto(out *ReservationAffinityParameters) { + *out = *in + if in.ConsumeReservationType != nil { + in, out := &in.ConsumeReservationType, &out.ConsumeReservationType + *out = new(string) + **out = **in + } + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReservationAffinityParameters. +func (in *ReservationAffinityParameters) DeepCopy() *ReservationAffinityParameters { + if in == nil { + return nil + } + out := new(ReservationAffinityParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingObservation) DeepCopyInto(out *SchedulingObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingObservation. +func (in *SchedulingObservation) DeepCopy() *SchedulingObservation { + if in == nil { + return nil + } + out := new(SchedulingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingParameters) DeepCopyInto(out *SchedulingParameters) { + *out = *in + if in.MaxFailuresPerHour != nil { + in, out := &in.MaxFailuresPerHour, &out.MaxFailuresPerHour + *out = new(float64) + **out = **in + } + if in.MaxFailuresTotal != nil { + in, out := &in.MaxFailuresTotal, &out.MaxFailuresTotal + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingParameters. +func (in *SchedulingParameters) DeepCopy() *SchedulingParameters { + if in == nil { + return nil + } + out := new(SchedulingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigAcceleratorsObservation) DeepCopyInto(out *SecondaryWorkerConfigAcceleratorsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigAcceleratorsObservation. +func (in *SecondaryWorkerConfigAcceleratorsObservation) DeepCopy() *SecondaryWorkerConfigAcceleratorsObservation { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigAcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigAcceleratorsParameters) DeepCopyInto(out *SecondaryWorkerConfigAcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigAcceleratorsParameters. +func (in *SecondaryWorkerConfigAcceleratorsParameters) DeepCopy() *SecondaryWorkerConfigAcceleratorsParameters { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigAcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigDiskConfigObservation) DeepCopyInto(out *SecondaryWorkerConfigDiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigDiskConfigObservation. +func (in *SecondaryWorkerConfigDiskConfigObservation) DeepCopy() *SecondaryWorkerConfigDiskConfigObservation { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigDiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigDiskConfigParameters) DeepCopyInto(out *SecondaryWorkerConfigDiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigDiskConfigParameters. +func (in *SecondaryWorkerConfigDiskConfigParameters) DeepCopy() *SecondaryWorkerConfigDiskConfigParameters { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigDiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigManagedGroupConfigObservation) DeepCopyInto(out *SecondaryWorkerConfigManagedGroupConfigObservation) { + *out = *in + if in.InstanceGroupManagerName != nil { + in, out := &in.InstanceGroupManagerName, &out.InstanceGroupManagerName + *out = new(string) + **out = **in + } + if in.InstanceTemplateName != nil { + in, out := &in.InstanceTemplateName, &out.InstanceTemplateName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigManagedGroupConfigObservation. +func (in *SecondaryWorkerConfigManagedGroupConfigObservation) DeepCopy() *SecondaryWorkerConfigManagedGroupConfigObservation { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigManagedGroupConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigManagedGroupConfigParameters) DeepCopyInto(out *SecondaryWorkerConfigManagedGroupConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigManagedGroupConfigParameters. +func (in *SecondaryWorkerConfigManagedGroupConfigParameters) DeepCopy() *SecondaryWorkerConfigManagedGroupConfigParameters { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigManagedGroupConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigObservation) DeepCopyInto(out *SecondaryWorkerConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigObservation. +func (in *SecondaryWorkerConfigObservation) DeepCopy() *SecondaryWorkerConfigObservation { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecondaryWorkerConfigParameters) DeepCopyInto(out *SecondaryWorkerConfigParameters) { + *out = *in + if in.MaxInstances != nil { + in, out := &in.MaxInstances, &out.MaxInstances + *out = new(float64) + **out = **in + } + if in.MinInstances != nil { + in, out := &in.MinInstances, &out.MinInstances + *out = new(float64) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecondaryWorkerConfigParameters. +func (in *SecondaryWorkerConfigParameters) DeepCopy() *SecondaryWorkerConfigParameters { + if in == nil { + return nil + } + out := new(SecondaryWorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigKerberosConfigObservation) DeepCopyInto(out *SecurityConfigKerberosConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigKerberosConfigObservation. +func (in *SecurityConfigKerberosConfigObservation) DeepCopy() *SecurityConfigKerberosConfigObservation { + if in == nil { + return nil + } + out := new(SecurityConfigKerberosConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigKerberosConfigParameters) DeepCopyInto(out *SecurityConfigKerberosConfigParameters) { + *out = *in + if in.CrossRealmTrustAdminServer != nil { + in, out := &in.CrossRealmTrustAdminServer, &out.CrossRealmTrustAdminServer + *out = new(string) + **out = **in + } + if in.CrossRealmTrustKdc != nil { + in, out := &in.CrossRealmTrustKdc, &out.CrossRealmTrustKdc + *out = new(string) + **out = **in + } + if in.CrossRealmTrustRealm != nil { + in, out := &in.CrossRealmTrustRealm, &out.CrossRealmTrustRealm + *out = new(string) + **out = **in + } + if in.CrossRealmTrustSharedPassword != nil { + in, out := &in.CrossRealmTrustSharedPassword, &out.CrossRealmTrustSharedPassword + *out = new(string) + **out = **in + } + if in.EnableKerberos != nil { + in, out := &in.EnableKerberos, &out.EnableKerberos + *out = new(bool) + **out = **in + } + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(string) + **out = **in + } + if in.KdcDBKey != nil { + in, out := &in.KdcDBKey, &out.KdcDBKey + *out = new(string) + **out = **in + } + if in.KeyPassword != nil { + in, out := &in.KeyPassword, &out.KeyPassword + *out = new(string) + **out = **in + } + if in.Keystore != nil { + in, out := &in.Keystore, &out.Keystore + *out = new(string) + **out = **in + } + if in.KeystorePassword != nil { + in, out := &in.KeystorePassword, &out.KeystorePassword + *out = new(string) + **out = **in + } + if in.Realm != nil { + in, out := &in.Realm, &out.Realm + *out = new(string) + **out = **in + } + if in.RootPrincipalPassword != nil { + in, out := &in.RootPrincipalPassword, &out.RootPrincipalPassword + *out = new(string) + **out = **in + } + if in.TgtLifetimeHours != nil { + in, out := &in.TgtLifetimeHours, &out.TgtLifetimeHours + *out = new(float64) + **out = **in + } + if in.Truststore != nil { + in, out := &in.Truststore, &out.Truststore + *out = new(string) + **out = **in + } + if in.TruststorePassword != nil { + in, out := &in.TruststorePassword, &out.TruststorePassword + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigKerberosConfigParameters. +func (in *SecurityConfigKerberosConfigParameters) DeepCopy() *SecurityConfigKerberosConfigParameters { + if in == nil { + return nil + } + out := new(SecurityConfigKerberosConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigObservation) DeepCopyInto(out *SecurityConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigObservation. +func (in *SecurityConfigObservation) DeepCopy() *SecurityConfigObservation { + if in == nil { + return nil + } + out := new(SecurityConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityConfigParameters) DeepCopyInto(out *SecurityConfigParameters) { + *out = *in + if in.KerberosConfig != nil { + in, out := &in.KerberosConfig, &out.KerberosConfig + *out = make([]KerberosConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityConfigParameters. +func (in *SecurityConfigParameters) DeepCopy() *SecurityConfigParameters { + if in == nil { + return nil + } + out := new(SecurityConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShieldedInstanceConfigObservation) DeepCopyInto(out *ShieldedInstanceConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShieldedInstanceConfigObservation. +func (in *ShieldedInstanceConfigObservation) DeepCopy() *ShieldedInstanceConfigObservation { + if in == nil { + return nil + } + out := new(ShieldedInstanceConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShieldedInstanceConfigParameters) DeepCopyInto(out *ShieldedInstanceConfigParameters) { + *out = *in + if in.EnableIntegrityMonitoring != nil { + in, out := &in.EnableIntegrityMonitoring, &out.EnableIntegrityMonitoring + *out = new(bool) + **out = **in + } + if in.EnableSecureBoot != nil { + in, out := &in.EnableSecureBoot, &out.EnableSecureBoot + *out = new(bool) + **out = **in + } + if in.EnableVtpm != nil { + in, out := &in.EnableVtpm, &out.EnableVtpm + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShieldedInstanceConfigParameters. +func (in *ShieldedInstanceConfigParameters) DeepCopy() *ShieldedInstanceConfigParameters { + if in == nil { + return nil + } + out := new(ShieldedInstanceConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareConfigObservation) DeepCopyInto(out *SoftwareConfigObservation) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareConfigObservation. +func (in *SoftwareConfigObservation) DeepCopy() *SoftwareConfigObservation { + if in == nil { + return nil + } + out := new(SoftwareConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareConfigParameters) DeepCopyInto(out *SoftwareConfigParameters) { + *out = *in + if in.ImageVersion != nil { + in, out := &in.ImageVersion, &out.ImageVersion + *out = new(string) + **out = **in + } + if in.OptionalComponents != nil { + in, out := &in.OptionalComponents, &out.OptionalComponents + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.OverrideProperties != nil { + in, out := &in.OverrideProperties, &out.OverrideProperties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareConfigParameters. +func (in *SoftwareConfigParameters) DeepCopy() *SoftwareConfigParameters { + if in == nil { + return nil + } + out := new(SoftwareConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigLoggingConfigObservation) DeepCopyInto(out *SparkConfigLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigLoggingConfigObservation. +func (in *SparkConfigLoggingConfigObservation) DeepCopy() *SparkConfigLoggingConfigObservation { + if in == nil { + return nil + } + out := new(SparkConfigLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigLoggingConfigParameters) DeepCopyInto(out *SparkConfigLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigLoggingConfigParameters. +func (in *SparkConfigLoggingConfigParameters) DeepCopy() *SparkConfigLoggingConfigParameters { + if in == nil { + return nil + } + out := new(SparkConfigLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigObservation) DeepCopyInto(out *SparkConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigObservation. +func (in *SparkConfigObservation) DeepCopy() *SparkConfigObservation { + if in == nil { + return nil + } + out := new(SparkConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkConfigParameters) DeepCopyInto(out *SparkConfigParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]SparkConfigLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainJarFileURI != nil { + in, out := &in.MainJarFileURI, &out.MainJarFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkConfigParameters. +func (in *SparkConfigParameters) DeepCopy() *SparkConfigParameters { + if in == nil { + return nil + } + out := new(SparkConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkHistoryServerConfigObservation) DeepCopyInto(out *SparkHistoryServerConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkHistoryServerConfigObservation. +func (in *SparkHistoryServerConfigObservation) DeepCopy() *SparkHistoryServerConfigObservation { + if in == nil { + return nil + } + out := new(SparkHistoryServerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkHistoryServerConfigParameters) DeepCopyInto(out *SparkHistoryServerConfigParameters) { + *out = *in + if in.DataprocCluster != nil { + in, out := &in.DataprocCluster, &out.DataprocCluster + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkHistoryServerConfigParameters. +func (in *SparkHistoryServerConfigParameters) DeepCopy() *SparkHistoryServerConfigParameters { + if in == nil { + return nil + } + out := new(SparkHistoryServerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkJobLoggingConfigObservation) DeepCopyInto(out *SparkJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkJobLoggingConfigObservation. +func (in *SparkJobLoggingConfigObservation) DeepCopy() *SparkJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(SparkJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkJobLoggingConfigParameters) DeepCopyInto(out *SparkJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkJobLoggingConfigParameters. +func (in *SparkJobLoggingConfigParameters) DeepCopy() *SparkJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(SparkJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkJobObservation) DeepCopyInto(out *SparkJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkJobObservation. +func (in *SparkJobObservation) DeepCopy() *SparkJobObservation { + if in == nil { + return nil + } + out := new(SparkJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkJobParameters) DeepCopyInto(out *SparkJobParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]SparkJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainClass != nil { + in, out := &in.MainClass, &out.MainClass + *out = new(string) + **out = **in + } + if in.MainJarFileURI != nil { + in, out := &in.MainJarFileURI, &out.MainJarFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkJobParameters. +func (in *SparkJobParameters) DeepCopy() *SparkJobParameters { + if in == nil { + return nil + } + out := new(SparkJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkRJobLoggingConfigObservation) DeepCopyInto(out *SparkRJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkRJobLoggingConfigObservation. +func (in *SparkRJobLoggingConfigObservation) DeepCopy() *SparkRJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(SparkRJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkRJobLoggingConfigParameters) DeepCopyInto(out *SparkRJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkRJobLoggingConfigParameters. +func (in *SparkRJobLoggingConfigParameters) DeepCopy() *SparkRJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(SparkRJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkRJobObservation) DeepCopyInto(out *SparkRJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkRJobObservation. +func (in *SparkRJobObservation) DeepCopy() *SparkRJobObservation { + if in == nil { + return nil + } + out := new(SparkRJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkRJobParameters) DeepCopyInto(out *SparkRJobParameters) { + *out = *in + if in.ArchiveUris != nil { + in, out := &in.ArchiveUris, &out.ArchiveUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.FileUris != nil { + in, out := &in.FileUris, &out.FileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]SparkRJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MainRFileURI != nil { + in, out := &in.MainRFileURI, &out.MainRFileURI + *out = new(string) + **out = **in + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkRJobParameters. +func (in *SparkRJobParameters) DeepCopy() *SparkRJobParameters { + if in == nil { + return nil + } + out := new(SparkRJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobLoggingConfigObservation) DeepCopyInto(out *SparkSQLJobLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobLoggingConfigObservation. +func (in *SparkSQLJobLoggingConfigObservation) DeepCopy() *SparkSQLJobLoggingConfigObservation { + if in == nil { + return nil + } + out := new(SparkSQLJobLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobLoggingConfigParameters) DeepCopyInto(out *SparkSQLJobLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobLoggingConfigParameters. +func (in *SparkSQLJobLoggingConfigParameters) DeepCopy() *SparkSQLJobLoggingConfigParameters { + if in == nil { + return nil + } + out := new(SparkSQLJobLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobObservation) DeepCopyInto(out *SparkSQLJobObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobObservation. +func (in *SparkSQLJobObservation) DeepCopy() *SparkSQLJobObservation { + if in == nil { + return nil + } + out := new(SparkSQLJobObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobParameters) DeepCopyInto(out *SparkSQLJobParameters) { + *out = *in + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]SparkSQLJobLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]SparkSQLJobQueryListParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobParameters. +func (in *SparkSQLJobParameters) DeepCopy() *SparkSQLJobParameters { + if in == nil { + return nil + } + out := new(SparkSQLJobParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobQueryListObservation) DeepCopyInto(out *SparkSQLJobQueryListObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobQueryListObservation. +func (in *SparkSQLJobQueryListObservation) DeepCopy() *SparkSQLJobQueryListObservation { + if in == nil { + return nil + } + out := new(SparkSQLJobQueryListObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparkSQLJobQueryListParameters) DeepCopyInto(out *SparkSQLJobQueryListParameters) { + *out = *in + if in.Queries != nil { + in, out := &in.Queries, &out.Queries + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkSQLJobQueryListParameters. +func (in *SparkSQLJobQueryListParameters) DeepCopy() *SparkSQLJobQueryListParameters { + if in == nil { + return nil + } + out := new(SparkSQLJobQueryListParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparksqlConfigLoggingConfigObservation) DeepCopyInto(out *SparksqlConfigLoggingConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparksqlConfigLoggingConfigObservation. +func (in *SparksqlConfigLoggingConfigObservation) DeepCopy() *SparksqlConfigLoggingConfigObservation { + if in == nil { + return nil + } + out := new(SparksqlConfigLoggingConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparksqlConfigLoggingConfigParameters) DeepCopyInto(out *SparksqlConfigLoggingConfigParameters) { + *out = *in + if in.DriverLogLevels != nil { + in, out := &in.DriverLogLevels, &out.DriverLogLevels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparksqlConfigLoggingConfigParameters. +func (in *SparksqlConfigLoggingConfigParameters) DeepCopy() *SparksqlConfigLoggingConfigParameters { + if in == nil { + return nil + } + out := new(SparksqlConfigLoggingConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparksqlConfigObservation) DeepCopyInto(out *SparksqlConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparksqlConfigObservation. +func (in *SparksqlConfigObservation) DeepCopy() *SparksqlConfigObservation { + if in == nil { + return nil + } + out := new(SparksqlConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SparksqlConfigParameters) DeepCopyInto(out *SparksqlConfigParameters) { + *out = *in + if in.JarFileUris != nil { + in, out := &in.JarFileUris, &out.JarFileUris + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.LoggingConfig != nil { + in, out := &in.LoggingConfig, &out.LoggingConfig + *out = make([]SparksqlConfigLoggingConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.QueryFileURI != nil { + in, out := &in.QueryFileURI, &out.QueryFileURI + *out = new(string) + **out = **in + } + if in.QueryList != nil { + in, out := &in.QueryList, &out.QueryList + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.ScriptVariables != nil { + in, out := &in.ScriptVariables, &out.ScriptVariables + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparksqlConfigParameters. +func (in *SparksqlConfigParameters) DeepCopy() *SparksqlConfigParameters { + if in == nil { + return nil + } + out := new(SparksqlConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusObservation) DeepCopyInto(out *StatusObservation) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(string) + **out = **in + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.StateStartTime != nil { + in, out := &in.StateStartTime, &out.StateStartTime + *out = new(string) + **out = **in + } + if in.Substate != nil { + in, out := &in.Substate, &out.Substate + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusObservation. +func (in *StatusObservation) DeepCopy() *StatusObservation { + if in == nil { + return nil + } + out := new(StatusObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusParameters) DeepCopyInto(out *StatusParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusParameters. +func (in *StatusParameters) DeepCopy() *StatusParameters { + if in == nil { + return nil + } + out := new(StatusParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationObservation) DeepCopyInto(out *ValidationObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationObservation. +func (in *ValidationObservation) DeepCopy() *ValidationObservation { + if in == nil { + return nil + } + out := new(ValidationObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationParameters) DeepCopyInto(out *ValidationParameters) { + *out = *in + if in.Regex != nil { + in, out := &in.Regex, &out.Regex + *out = make([]RegexParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]ValuesParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationParameters. +func (in *ValidationParameters) DeepCopy() *ValidationParameters { + if in == nil { + return nil + } + out := new(ValidationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValuesObservation) DeepCopyInto(out *ValuesObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValuesObservation. +func (in *ValuesObservation) DeepCopy() *ValuesObservation { + if in == nil { + return nil + } + out := new(ValuesObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValuesParameters) DeepCopyInto(out *ValuesParameters) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValuesParameters. +func (in *ValuesParameters) DeepCopy() *ValuesParameters { + if in == nil { + return nil + } + out := new(ValuesParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualClusterConfigObservation) DeepCopyInto(out *VirtualClusterConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterConfigObservation. +func (in *VirtualClusterConfigObservation) DeepCopy() *VirtualClusterConfigObservation { + if in == nil { + return nil + } + out := new(VirtualClusterConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualClusterConfigParameters) DeepCopyInto(out *VirtualClusterConfigParameters) { + *out = *in + if in.AuxiliaryServicesConfig != nil { + in, out := &in.AuxiliaryServicesConfig, &out.AuxiliaryServicesConfig + *out = make([]AuxiliaryServicesConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubernetesClusterConfig != nil { + in, out := &in.KubernetesClusterConfig, &out.KubernetesClusterConfig + *out = make([]KubernetesClusterConfigParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StagingBucket != nil { + in, out := &in.StagingBucket, &out.StagingBucket + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualClusterConfigParameters. +func (in *VirtualClusterConfigParameters) DeepCopy() *VirtualClusterConfigParameters { + if in == nil { + return nil + } + out := new(VirtualClusterConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigAcceleratorsObservation) DeepCopyInto(out *WorkerConfigAcceleratorsObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigAcceleratorsObservation. +func (in *WorkerConfigAcceleratorsObservation) DeepCopy() *WorkerConfigAcceleratorsObservation { + if in == nil { + return nil + } + out := new(WorkerConfigAcceleratorsObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigAcceleratorsParameters) DeepCopyInto(out *WorkerConfigAcceleratorsParameters) { + *out = *in + if in.AcceleratorCount != nil { + in, out := &in.AcceleratorCount, &out.AcceleratorCount + *out = new(float64) + **out = **in + } + if in.AcceleratorType != nil { + in, out := &in.AcceleratorType, &out.AcceleratorType + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigAcceleratorsParameters. +func (in *WorkerConfigAcceleratorsParameters) DeepCopy() *WorkerConfigAcceleratorsParameters { + if in == nil { + return nil + } + out := new(WorkerConfigAcceleratorsParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigDiskConfigObservation) DeepCopyInto(out *WorkerConfigDiskConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigDiskConfigObservation. +func (in *WorkerConfigDiskConfigObservation) DeepCopy() *WorkerConfigDiskConfigObservation { + if in == nil { + return nil + } + out := new(WorkerConfigDiskConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigDiskConfigParameters) DeepCopyInto(out *WorkerConfigDiskConfigParameters) { + *out = *in + if in.BootDiskSizeGb != nil { + in, out := &in.BootDiskSizeGb, &out.BootDiskSizeGb + *out = new(float64) + **out = **in + } + if in.BootDiskType != nil { + in, out := &in.BootDiskType, &out.BootDiskType + *out = new(string) + **out = **in + } + if in.NumLocalSsds != nil { + in, out := &in.NumLocalSsds, &out.NumLocalSsds + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigDiskConfigParameters. +func (in *WorkerConfigDiskConfigParameters) DeepCopy() *WorkerConfigDiskConfigParameters { + if in == nil { + return nil + } + out := new(WorkerConfigDiskConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigManagedGroupConfigObservation) DeepCopyInto(out *WorkerConfigManagedGroupConfigObservation) { + *out = *in + if in.InstanceGroupManagerName != nil { + in, out := &in.InstanceGroupManagerName, &out.InstanceGroupManagerName + *out = new(string) + **out = **in + } + if in.InstanceTemplateName != nil { + in, out := &in.InstanceTemplateName, &out.InstanceTemplateName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigManagedGroupConfigObservation. +func (in *WorkerConfigManagedGroupConfigObservation) DeepCopy() *WorkerConfigManagedGroupConfigObservation { + if in == nil { + return nil + } + out := new(WorkerConfigManagedGroupConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigManagedGroupConfigParameters) DeepCopyInto(out *WorkerConfigManagedGroupConfigParameters) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigManagedGroupConfigParameters. +func (in *WorkerConfigManagedGroupConfigParameters) DeepCopy() *WorkerConfigManagedGroupConfigParameters { + if in == nil { + return nil + } + out := new(WorkerConfigManagedGroupConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigObservation) DeepCopyInto(out *WorkerConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigObservation. +func (in *WorkerConfigObservation) DeepCopy() *WorkerConfigObservation { + if in == nil { + return nil + } + out := new(WorkerConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerConfigParameters) DeepCopyInto(out *WorkerConfigParameters) { + *out = *in + if in.MaxInstances != nil { + in, out := &in.MaxInstances, &out.MaxInstances + *out = new(float64) + **out = **in + } + if in.MinInstances != nil { + in, out := &in.MinInstances, &out.MinInstances + *out = new(float64) + **out = **in + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerConfigParameters. +func (in *WorkerConfigParameters) DeepCopy() *WorkerConfigParameters { + if in == nil { + return nil + } + out := new(WorkerConfigParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplate) DeepCopyInto(out *WorkflowTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplate. +func (in *WorkflowTemplate) DeepCopy() *WorkflowTemplate { + if in == nil { + return nil + } + out := new(WorkflowTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateList) DeepCopyInto(out *WorkflowTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WorkflowTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateList. +func (in *WorkflowTemplateList) DeepCopy() *WorkflowTemplateList { + if in == nil { + return nil + } + out := new(WorkflowTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkflowTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateObservation) DeepCopyInto(out *WorkflowTemplateObservation) { + *out = *in + if in.CreateTime != nil { + in, out := &in.CreateTime, &out.CreateTime + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make([]WorkflowTemplatePlacementObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.UpdateTime != nil { + in, out := &in.UpdateTime, &out.UpdateTime + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateObservation. +func (in *WorkflowTemplateObservation) DeepCopy() *WorkflowTemplateObservation { + if in == nil { + return nil + } + out := new(WorkflowTemplateObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateParameters) DeepCopyInto(out *WorkflowTemplateParameters) { + *out = *in + if in.DagTimeout != nil { + in, out := &in.DagTimeout, &out.DagTimeout + *out = new(string) + **out = **in + } + if in.Jobs != nil { + in, out := &in.Jobs, &out.Jobs + *out = make([]JobsParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]*string, len(*in)) + for key, val := range *in { + var outVal *string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(string) + **out = **in + } + (*out)[key] = outVal + } + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]ParametersParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = make([]WorkflowTemplatePlacementParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateParameters. +func (in *WorkflowTemplateParameters) DeepCopy() *WorkflowTemplateParameters { + if in == nil { + return nil + } + out := new(WorkflowTemplateParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplatePlacementObservation) DeepCopyInto(out *WorkflowTemplatePlacementObservation) { + *out = *in + if in.ManagedCluster != nil { + in, out := &in.ManagedCluster, &out.ManagedCluster + *out = make([]ManagedClusterObservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplatePlacementObservation. +func (in *WorkflowTemplatePlacementObservation) DeepCopy() *WorkflowTemplatePlacementObservation { + if in == nil { + return nil + } + out := new(WorkflowTemplatePlacementObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplatePlacementParameters) DeepCopyInto(out *WorkflowTemplatePlacementParameters) { + *out = *in + if in.ClusterSelector != nil { + in, out := &in.ClusterSelector, &out.ClusterSelector + *out = make([]ClusterSelectorParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ManagedCluster != nil { + in, out := &in.ManagedCluster, &out.ManagedCluster + *out = make([]ManagedClusterParameters, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplatePlacementParameters. +func (in *WorkflowTemplatePlacementParameters) DeepCopy() *WorkflowTemplatePlacementParameters { + if in == nil { + return nil + } + out := new(WorkflowTemplatePlacementParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateSpec) DeepCopyInto(out *WorkflowTemplateSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateSpec. +func (in *WorkflowTemplateSpec) DeepCopy() *WorkflowTemplateSpec { + if in == nil { + return nil + } + out := new(WorkflowTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowTemplateStatus) DeepCopyInto(out *WorkflowTemplateStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowTemplateStatus. +func (in *WorkflowTemplateStatus) DeepCopy() *WorkflowTemplateStatus { + if in == nil { + return nil + } + out := new(WorkflowTemplateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YarnConfigObservation) DeepCopyInto(out *YarnConfigObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YarnConfigObservation. +func (in *YarnConfigObservation) DeepCopy() *YarnConfigObservation { + if in == nil { + return nil + } + out := new(YarnConfigObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YarnConfigParameters) DeepCopyInto(out *YarnConfigParameters) { + *out = *in + if in.GracefulDecommissionTimeout != nil { + in, out := &in.GracefulDecommissionTimeout, &out.GracefulDecommissionTimeout + *out = new(string) + **out = **in + } + if in.ScaleDownFactor != nil { + in, out := &in.ScaleDownFactor, &out.ScaleDownFactor + *out = new(float64) + **out = **in + } + if in.ScaleDownMinWorkerFraction != nil { + in, out := &in.ScaleDownMinWorkerFraction, &out.ScaleDownMinWorkerFraction + *out = new(float64) + **out = **in + } + if in.ScaleUpFactor != nil { + in, out := &in.ScaleUpFactor, &out.ScaleUpFactor + *out = new(float64) + **out = **in + } + if in.ScaleUpMinWorkerFraction != nil { + in, out := &in.ScaleUpMinWorkerFraction, &out.ScaleUpMinWorkerFraction + *out = new(float64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YarnConfigParameters. +func (in *YarnConfigParameters) DeepCopy() *YarnConfigParameters { + if in == nil { + return nil + } + out := new(YarnConfigParameters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/dataproc/v1beta1/zz_generated.managed.go b/apis/dataproc/v1beta1/zz_generated.managed.go new file mode 100644 index 000000000..81b7ed4d0 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_generated.managed.go @@ -0,0 +1,284 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this AutoscalingPolicy. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *AutoscalingPolicy) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this AutoscalingPolicy. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *AutoscalingPolicy) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this AutoscalingPolicy. +func (mg *AutoscalingPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Cluster. +func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Cluster. +func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Cluster. +func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Cluster. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Cluster) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Cluster. +func (mg *Cluster) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Cluster. +func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Cluster. +func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Cluster. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Cluster) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Cluster. +func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Cluster. +func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this Job. +func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Job. +func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Job. +func (mg *Job) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Job. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Job) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Job. +func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Job. +func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Job. +func (mg *Job) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Job. +func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Job. +func (mg *Job) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Job. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Job) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Job. +func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Job. +func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this WorkflowTemplate. +func (mg *WorkflowTemplate) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this WorkflowTemplate. +func (mg *WorkflowTemplate) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this WorkflowTemplate. +func (mg *WorkflowTemplate) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this WorkflowTemplate. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *WorkflowTemplate) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this WorkflowTemplate. +func (mg *WorkflowTemplate) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this WorkflowTemplate. +func (mg *WorkflowTemplate) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this WorkflowTemplate. +func (mg *WorkflowTemplate) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this WorkflowTemplate. +func (mg *WorkflowTemplate) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this WorkflowTemplate. +func (mg *WorkflowTemplate) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this WorkflowTemplate. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *WorkflowTemplate) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this WorkflowTemplate. +func (mg *WorkflowTemplate) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this WorkflowTemplate. +func (mg *WorkflowTemplate) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/apis/dataproc/v1beta1/zz_generated.managedlist.go b/apis/dataproc/v1beta1/zz_generated.managedlist.go new file mode 100644 index 000000000..133a5dbe3 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_generated.managedlist.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AutoscalingPolicyList. +func (l *AutoscalingPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this ClusterList. +func (l *ClusterList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this JobList. +func (l *JobList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this WorkflowTemplateList. +func (l *WorkflowTemplateList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/apis/dataproc/v1beta1/zz_generated.resolvers.go b/apis/dataproc/v1beta1/zz_generated.resolvers.go new file mode 100644 index 000000000..d65f8c5d1 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_generated.resolvers.go @@ -0,0 +1,102 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + reference "github.com/crossplane/crossplane-runtime/pkg/reference" + errors "github.com/pkg/errors" + v1beta1 "github.com/upbound/provider-gcp/apis/cloudplatform/v1beta1" + resource "github.com/upbound/upjet/pkg/resource" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ResolveReferences of this Cluster. +func (mg *Cluster) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.ClusterConfig); i3++ { + for i4 := 0; i4 < len(mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig); i4++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccount), + Extract: resource.ExtractParamPath("email", true), + Reference: mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccountRef, + Selector: mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccountSelector, + To: reference.To{ + List: &v1beta1.ServiceAccountList{}, + Managed: &v1beta1.ServiceAccount{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccount") + } + mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccount = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ClusterConfig[i3].GceClusterConfig[i4].ServiceAccountRef = rsp.ResolvedReference + + } + } + + return nil +} + +// ResolveReferences of this Job. +func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error { + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + + for i3 := 0; i3 < len(mg.Spec.ForProvider.Placement); i3++ { + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Placement[i3].ClusterName), + Extract: resource.ExtractParamPath("name", false), + Reference: mg.Spec.ForProvider.Placement[i3].ClusterNameRef, + Selector: mg.Spec.ForProvider.Placement[i3].ClusterNameSelector, + To: reference.To{ + List: &ClusterList{}, + Managed: &Cluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Placement[i3].ClusterName") + } + mg.Spec.ForProvider.Placement[i3].ClusterName = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.Placement[i3].ClusterNameRef = rsp.ResolvedReference + + } + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Region), + Extract: resource.ExtractParamPath("region", false), + Reference: mg.Spec.ForProvider.RegionRef, + Selector: mg.Spec.ForProvider.RegionSelector, + To: reference.To{ + List: &ClusterList{}, + Managed: &Cluster{}, + }, + }) + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Region") + } + mg.Spec.ForProvider.Region = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.RegionRef = rsp.ResolvedReference + + return nil +} diff --git a/apis/dataproc/v1beta1/zz_generated_terraformed.go b/apis/dataproc/v1beta1/zz_generated_terraformed.go new file mode 100755 index 000000000..3cc47499d --- /dev/null +++ b/apis/dataproc/v1beta1/zz_generated_terraformed.go @@ -0,0 +1,322 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "github.com/pkg/errors" + + "github.com/upbound/upjet/pkg/resource" + "github.com/upbound/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this AutoscalingPolicy +func (mg *AutoscalingPolicy) GetTerraformResourceType() string { + return "google_dataproc_autoscaling_policy" +} + +// GetConnectionDetailsMapping for this AutoscalingPolicy +func (tr *AutoscalingPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this AutoscalingPolicy +func (tr *AutoscalingPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this AutoscalingPolicy +func (tr *AutoscalingPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this AutoscalingPolicy +func (tr *AutoscalingPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this AutoscalingPolicy +func (tr *AutoscalingPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this AutoscalingPolicy +func (tr *AutoscalingPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this AutoscalingPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *AutoscalingPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &AutoscalingPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *AutoscalingPolicy) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Cluster +func (mg *Cluster) GetTerraformResourceType() string { + return "google_dataproc_cluster" +} + +// GetConnectionDetailsMapping for this Cluster +func (tr *Cluster) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Cluster +func (tr *Cluster) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Cluster +func (tr *Cluster) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Cluster +func (tr *Cluster) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Cluster +func (tr *Cluster) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Cluster +func (tr *Cluster) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Cluster using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Cluster) LateInitialize(attrs []byte) (bool, error) { + params := &ClusterParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Cluster) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this Job +func (mg *Job) GetTerraformResourceType() string { + return "google_dataproc_job" +} + +// GetConnectionDetailsMapping for this Job +func (tr *Job) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this Job +func (tr *Job) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this Job +func (tr *Job) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this Job +func (tr *Job) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this Job +func (tr *Job) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this Job +func (tr *Job) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this Job using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *Job) LateInitialize(attrs []byte) (bool, error) { + params := &JobParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *Job) GetTerraformSchemaVersion() int { + return 0 +} + +// GetTerraformResourceType returns Terraform resource type for this WorkflowTemplate +func (mg *WorkflowTemplate) GetTerraformResourceType() string { + return "google_dataproc_workflow_template" +} + +// GetConnectionDetailsMapping for this WorkflowTemplate +func (tr *WorkflowTemplate) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this WorkflowTemplate +func (tr *WorkflowTemplate) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this WorkflowTemplate +func (tr *WorkflowTemplate) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this WorkflowTemplate +func (tr *WorkflowTemplate) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this WorkflowTemplate +func (tr *WorkflowTemplate) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this WorkflowTemplate +func (tr *WorkflowTemplate) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// LateInitialize this WorkflowTemplate using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *WorkflowTemplate) LateInitialize(attrs []byte) (bool, error) { + params := &WorkflowTemplateParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *WorkflowTemplate) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataproc/v1beta1/zz_groupversion_info.go b/apis/dataproc/v1beta1/zz_groupversion_info.go new file mode 100755 index 000000000..af64fb779 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_groupversion_info.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +// +kubebuilder:object:generate=true +// +groupName=dataproc.gcp.upbound.io +// +versionName=v1beta1 +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + CRDGroup = "dataproc.gcp.upbound.io" + CRDVersion = "v1beta1" +) + +var ( + // CRDGroupVersion is the API Group Version used to register the objects + CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/dataproc/v1beta1/zz_job_types.go b/apis/dataproc/v1beta1/zz_job_types.go new file mode 100755 index 000000000..42fdc046e --- /dev/null +++ b/apis/dataproc/v1beta1/zz_job_types.go @@ -0,0 +1,525 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type HadoopConfigObservation struct { +} + +type HadoopConfigParameters struct { + + // HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []LoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris. Conflicts with main_jar_file_uri + // +kubebuilder:validation:Optional + MainClass *string `json:"mainClass,omitempty" tf:"main_class,omitempty"` + + // The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'. Conflicts with main_class + // +kubebuilder:validation:Optional + MainJarFileURI *string `json:"mainJarFileUri,omitempty" tf:"main_jar_file_uri,omitempty"` + + // A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type HiveConfigObservation struct { +} + +type HiveConfigParameters struct { + + // Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // HCFS URI of file containing Hive script to execute as the job. + // Conflicts with query_list + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // The list of Hive queries or statements to execute as part of the job. + // Conflicts with query_file_uri + // +kubebuilder:validation:Optional + QueryList []*string `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Mapping of query variable names to values (equivalent to the Hive command: SET name="value";). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type JobObservation struct { + + // If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri. + DriverControlsFilesURI *string `json:"driverControlsFilesUri,omitempty" tf:"driver_controls_files_uri,omitempty"` + + // A URI pointing to the location of the stdout of the job's driver program. + DriverOutputResourceURI *string `json:"driverOutputResourceUri,omitempty" tf:"driver_output_resource_uri,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // +kubebuilder:validation:Required + Placement []PlacementObservation `json:"placement,omitempty" tf:"placement,omitempty"` + + Status []StatusObservation `json:"status,omitempty" tf:"status,omitempty"` +} + +type JobParameters struct { + + // By default, you can only delete inactive jobs within + // Dataproc. Setting this to true, and calling destroy, will ensure that the + // job is first cancelled before issuing the delete. + // +kubebuilder:validation:Optional + ForceDelete *bool `json:"forceDelete,omitempty" tf:"force_delete,omitempty"` + + // +kubebuilder:validation:Optional + HadoopConfig []HadoopConfigParameters `json:"hadoopConfig,omitempty" tf:"hadoop_config,omitempty"` + + // +kubebuilder:validation:Optional + HiveConfig []HiveConfigParameters `json:"hiveConfig,omitempty" tf:"hive_config,omitempty"` + + // The list of labels (key/value pairs) to add to the job. + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // +kubebuilder:validation:Optional + PigConfig []PigConfigParameters `json:"pigConfig,omitempty" tf:"pig_config,omitempty"` + + // +kubebuilder:validation:Required + Placement []PlacementParameters `json:"placement" tf:"placement,omitempty"` + + // +kubebuilder:validation:Optional + PrestoConfig []PrestoConfigParameters `json:"prestoConfig,omitempty" tf:"presto_config,omitempty"` + + // The project in which the cluster can be found and jobs + // subsequently run against. If it is not provided, the provider project is used. + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // +kubebuilder:validation:Optional + PysparkConfig []PysparkConfigParameters `json:"pysparkConfig,omitempty" tf:"pyspark_config,omitempty"` + + // +kubebuilder:validation:Optional + Reference []ReferenceParameters `json:"reference,omitempty" tf:"reference,omitempty"` + + // The Cloud Dataproc region. This essentially determines which clusters are available + // for this job to be submitted to. If not specified, defaults to global. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataproc/v1beta1.Cluster + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("region",false) + // +kubebuilder:validation:Optional + Region *string `json:"region,omitempty" tf:"region,omitempty"` + + // Reference to a Cluster in dataproc to populate region. + // +kubebuilder:validation:Optional + RegionRef *v1.Reference `json:"regionRef,omitempty" tf:"-"` + + // Selector for a Cluster in dataproc to populate region. + // +kubebuilder:validation:Optional + RegionSelector *v1.Selector `json:"regionSelector,omitempty" tf:"-"` + + // +kubebuilder:validation:Optional + Scheduling []SchedulingParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + + // +kubebuilder:validation:Optional + SparkConfig []SparkConfigParameters `json:"sparkConfig,omitempty" tf:"spark_config,omitempty"` + + // +kubebuilder:validation:Optional + SparksqlConfig []SparksqlConfigParameters `json:"sparksqlConfig,omitempty" tf:"sparksql_config,omitempty"` +} + +type LoggingConfigObservation struct { +} + +type LoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type PigConfigLoggingConfigObservation struct { +} + +type PigConfigLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type PigConfigObservation struct { +} + +type PigConfigParameters struct { + + // Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []PigConfigLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // HCFS URI of file containing Hive script to execute as the job. + // Conflicts with query_list + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // The list of Hive queries or statements to execute as part of the job. + // Conflicts with query_file_uri + // +kubebuilder:validation:Optional + QueryList []*string `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Mapping of query variable names to values (equivalent to the Pig command: name=[value]). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type PlacementObservation struct { + + // A cluster UUID generated by the Cloud Dataproc service when the job is submitted. + ClusterUUID *string `json:"clusterUuid,omitempty" tf:"cluster_uuid,omitempty"` +} + +type PlacementParameters struct { + + // The name of the cluster where the job + // will be submitted. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataproc/v1beta1.Cluster + // +crossplane:generate:reference:extractor=github.com/upbound/upjet/pkg/resource.ExtractParamPath("name",false) + // +kubebuilder:validation:Optional + ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` + + // Reference to a Cluster in dataproc to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameRef *v1.Reference `json:"clusterNameRef,omitempty" tf:"-"` + + // Selector for a Cluster in dataproc to populate clusterName. + // +kubebuilder:validation:Optional + ClusterNameSelector *v1.Selector `json:"clusterNameSelector,omitempty" tf:"-"` +} + +type PrestoConfigLoggingConfigObservation struct { +} + +type PrestoConfigLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type PrestoConfigObservation struct { +} + +type PrestoConfigParameters struct { + + // Presto client tags to attach to this query. + // +kubebuilder:validation:Optional + ClientTags []*string `json:"clientTags,omitempty" tf:"client_tags,omitempty"` + + // Whether to continue executing queries if a query fails. Setting to true can be useful when executing independent parallel queries. Defaults to false. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []PrestoConfigLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The format in which query output will be displayed. See the Presto documentation for supported output formats. + // +kubebuilder:validation:Optional + OutputFormat *string `json:"outputFormat,omitempty" tf:"output_format,omitempty"` + + // A mapping of property names to values. Used to set Presto session properties Equivalent to using the --session flag in the Presto CLI. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // Conflicts with query_list + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // The list of SQL queries or statements to execute as part of the job. + // Conflicts with query_file_uri + // +kubebuilder:validation:Optional + QueryList []*string `json:"queryList,omitempty" tf:"query_list,omitempty"` +} + +type PysparkConfigLoggingConfigObservation struct { +} + +type PysparkConfigLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type PysparkConfigObservation struct { +} + +type PysparkConfigParameters struct { + + // HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // The arguments to pass to the driver. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []PysparkConfigLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The HCFS URI of the main Python file to use as the driver. Must be a .py file. + // +kubebuilder:validation:Required + MainPythonFileURI *string `json:"mainPythonFileUri" tf:"main_python_file_uri,omitempty"` + + // A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + // +kubebuilder:validation:Optional + PythonFileUris []*string `json:"pythonFileUris,omitempty" tf:"python_file_uris,omitempty"` +} + +type ReferenceObservation struct { +} + +type ReferenceParameters struct { + + // +kubebuilder:validation:Optional + JobID *string `json:"jobId,omitempty" tf:"job_id,omitempty"` +} + +type SchedulingObservation struct { +} + +type SchedulingParameters struct { + + // Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. + // +kubebuilder:validation:Required + MaxFailuresPerHour *float64 `json:"maxFailuresPerHour" tf:"max_failures_per_hour,omitempty"` + + // Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. + // +kubebuilder:validation:Required + MaxFailuresTotal *float64 `json:"maxFailuresTotal" tf:"max_failures_total,omitempty"` +} + +type SparkConfigLoggingConfigObservation struct { +} + +type SparkConfigLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type SparkConfigObservation struct { +} + +type SparkConfigParameters struct { + + // HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // The arguments to pass to the driver. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []SparkConfigLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The class containing the main method of the driver. Must be in a + // provided jar or jar that is already on the classpath. Conflicts with main_jar_file_uri + // +kubebuilder:validation:Optional + MainClass *string `json:"mainClass,omitempty" tf:"main_class,omitempty"` + + // The HCFS URI of jar file containing + // the driver jar. Conflicts with main_class + // +kubebuilder:validation:Optional + MainJarFileURI *string `json:"mainJarFileUri,omitempty" tf:"main_jar_file_uri,omitempty"` + + // A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type SparksqlConfigLoggingConfigObservation struct { +} + +type SparksqlConfigLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Required + DriverLogLevels map[string]*string `json:"driverLogLevels" tf:"driver_log_levels,omitempty"` +} + +type SparksqlConfigObservation struct { +} + +type SparksqlConfigParameters struct { + + // HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // +kubebuilder:validation:Optional + LoggingConfig []SparksqlConfigLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // Conflicts with query_list + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // The list of SQL queries or statements to execute as part of the job. + // Conflicts with query_file_uri + // +kubebuilder:validation:Optional + QueryList []*string `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type StatusObservation struct { + + // Optional job state details, such as an error description if the state is ERROR. + Details *string `json:"details,omitempty" tf:"details,omitempty"` + + // A state message specifying the overall job state. + State *string `json:"state,omitempty" tf:"state,omitempty"` + + // The time when this state was entered. + StateStartTime *string `json:"stateStartTime,omitempty" tf:"state_start_time,omitempty"` + + // Additional state information, which includes status reported by the agent. + Substate *string `json:"substate,omitempty" tf:"substate,omitempty"` +} + +type StatusParameters struct { +} + +// JobSpec defines the desired state of Job +type JobSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider JobParameters `json:"forProvider"` +} + +// JobStatus defines the observed state of Job. +type JobStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider JobObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// Job is the Schema for the Jobs API. Manages a job resource within a Dataproc cluster. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type Job struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec JobSpec `json:"spec"` + Status JobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// JobList contains a list of Jobs +type JobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Job `json:"items"` +} + +// Repository type metadata. +var ( + Job_Kind = "Job" + Job_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Job_Kind}.String() + Job_KindAPIVersion = Job_Kind + "." + CRDGroupVersion.String() + Job_GroupVersionKind = CRDGroupVersion.WithKind(Job_Kind) +) + +func init() { + SchemeBuilder.Register(&Job{}, &JobList{}) +} diff --git a/apis/dataproc/v1beta1/zz_workflowtemplate_types.go b/apis/dataproc/v1beta1/zz_workflowtemplate_types.go new file mode 100755 index 000000000..88f4f6cd2 --- /dev/null +++ b/apis/dataproc/v1beta1/zz_workflowtemplate_types.go @@ -0,0 +1,1286 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ClusterSelectorObservation struct { +} + +type ClusterSelectorParameters struct { + + // Required. The cluster labels. Cluster must have all labels to match. + // +kubebuilder:validation:Required + ClusterLabels map[string]*string `json:"clusterLabels" tf:"cluster_labels,omitempty"` + + // Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ConfigAutoscalingConfigObservation struct { +} + +type ConfigAutoscalingConfigParameters struct { + + // Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ Note that the policy must be in the same project and Dataproc region. + // +kubebuilder:validation:Optional + Policy *string `json:"policy,omitempty" tf:"policy,omitempty"` +} + +type ConfigEncryptionConfigObservation struct { +} + +type ConfigEncryptionConfigParameters struct { + + // Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster. + // +kubebuilder:validation:Optional + GcePdKMSKeyName *string `json:"gcePdKmsKeyName,omitempty" tf:"gce_pd_kms_key_name,omitempty"` +} + +type ConfigEndpointConfigObservation struct { + + // Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. + HTTPPorts map[string]*string `json:"httpPorts,omitempty" tf:"http_ports,omitempty"` +} + +type ConfigEndpointConfigParameters struct { + + // Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. + // +kubebuilder:validation:Optional + EnableHTTPPortAccess *bool `json:"enableHttpPortAccess,omitempty" tf:"enable_http_port_access,omitempty"` +} + +type ConfigGceClusterConfigObservation struct { +} + +type ConfigGceClusterConfigParameters struct { + + // Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. + // +kubebuilder:validation:Optional + InternalIPOnly *bool `json:"internalIpOnly,omitempty" tf:"internal_ip_only,omitempty"` + + // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // +kubebuilder:validation:Optional + Metadata map[string]*string `json:"metadata,omitempty" tf:"metadata,omitempty"` + + // Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default*default` + // +kubebuilder:validation:Optional + Network *string `json:"network,omitempty" tf:"network,omitempty"` + + // Optional. Node Group Affinity for sole-tenant clusters. + // +kubebuilder:validation:Optional + NodeGroupAffinity []NodeGroupAffinityParameters `json:"nodeGroupAffinity,omitempty" tf:"node_group_affinity,omitempty"` + + // Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL + // +kubebuilder:validation:Optional + PrivateIPv6GoogleAccess *string `json:"privateIpv6GoogleAccess,omitempty" tf:"private_ipv6_google_access,omitempty"` + + // Optional. Reservation Affinity for consuming Zonal reservation. + // +kubebuilder:validation:Optional + ReservationAffinity []ReservationAffinityParameters `json:"reservationAffinity,omitempty" tf:"reservation_affinity,omitempty"` + + // Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used. + // +kubebuilder:validation:Optional + ServiceAccount *string `json:"serviceAccount,omitempty" tf:"service_account,omitempty"` + + // Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control + // +kubebuilder:validation:Optional + ServiceAccountScopes []*string `json:"serviceAccountScopes,omitempty" tf:"service_account_scopes,omitempty"` + + // Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs. Structure defined below. + // +kubebuilder:validation:Optional + ShieldedInstanceConfig []GceClusterConfigShieldedInstanceConfigParameters `json:"shieldedInstanceConfig,omitempty" tf:"shielded_instance_config,omitempty"` + + // Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 * sub0 + // +kubebuilder:validation:Optional + Subnetwork *string `json:"subnetwork,omitempty" tf:"subnetwork,omitempty"` + + // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // +kubebuilder:validation:Optional + Tags []*string `json:"tags,omitempty" tf:"tags,omitempty"` + + // Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ * us-central1-f + // +kubebuilder:validation:Optional + Zone *string `json:"zone,omitempty" tf:"zone,omitempty"` +} + +type ConfigLifecycleConfigObservation struct { + + // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + IdleStartTime *string `json:"idleStartTime,omitempty" tf:"idle_start_time,omitempty"` +} + +type ConfigLifecycleConfigParameters struct { + + // Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // +kubebuilder:validation:Optional + AutoDeleteTTL *string `json:"autoDeleteTtl,omitempty" tf:"auto_delete_ttl,omitempty"` + + // Optional. The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // +kubebuilder:validation:Optional + AutoDeleteTime *string `json:"autoDeleteTime,omitempty" tf:"auto_delete_time,omitempty"` + + // Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // +kubebuilder:validation:Optional + IdleDeleteTTL *string `json:"idleDeleteTtl,omitempty" tf:"idle_delete_ttl,omitempty"` +} + +type ConfigMasterConfigObservation struct { + + // Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` + + // Output only. Specifies that this instance group contains preemptible instances. + IsPreemptible *bool `json:"isPreemptible,omitempty" tf:"is_preemptible,omitempty"` + + // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. + ManagedGroupConfig []ManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` +} + +type ConfigMasterConfigParameters struct { + + // Optional. The Compute Engine accelerator configuration for these instances. + // +kubebuilder:validation:Optional + Accelerators []MasterConfigAcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + + // Optional. Disk option config settings. + // +kubebuilder:validation:Optional + DiskConfig []MasterConfigDiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1. + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` + + // Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE + // +kubebuilder:validation:Optional + Preemptibility *string `json:"preemptibility,omitempty" tf:"preemptibility,omitempty"` +} + +type ConfigSecondaryWorkerConfigObservation struct { + + // Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` + + // Output only. Specifies that this instance group contains preemptible instances. + IsPreemptible *bool `json:"isPreemptible,omitempty" tf:"is_preemptible,omitempty"` + + // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. + ManagedGroupConfig []SecondaryWorkerConfigManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` +} + +type ConfigSecondaryWorkerConfigParameters struct { + + // Optional. The Compute Engine accelerator configuration for these instances. + // +kubebuilder:validation:Optional + Accelerators []SecondaryWorkerConfigAcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + + // Optional. Disk option config settings. + // +kubebuilder:validation:Optional + DiskConfig []SecondaryWorkerConfigDiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1. + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` + + // Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE + // +kubebuilder:validation:Optional + Preemptibility *string `json:"preemptibility,omitempty" tf:"preemptibility,omitempty"` +} + +type ConfigSecurityConfigObservation struct { +} + +type ConfigSecurityConfigParameters struct { + + // Kerberos related configuration. + // +kubebuilder:validation:Optional + KerberosConfig []SecurityConfigKerberosConfigParameters `json:"kerberosConfig,omitempty" tf:"kerberos_config,omitempty"` +} + +type ConfigSoftwareConfigObservation struct { +} + +type ConfigSoftwareConfigParameters struct { + + // Optional. The version of software inside the cluster. It must be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. + // +kubebuilder:validation:Optional + ImageVersion *string `json:"imageVersion,omitempty" tf:"image_version,omitempty"` + + // +kubebuilder:validation:Optional + OptionalComponents []*string `json:"optionalComponents,omitempty" tf:"optional_components,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type ConfigWorkerConfigAcceleratorsObservation struct { +} + +type ConfigWorkerConfigAcceleratorsParameters struct { + + // The number of the accelerator cards of this type exposed to this instance. + // +kubebuilder:validation:Optional + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type ConfigWorkerConfigDiskConfigObservation struct { +} + +type ConfigWorkerConfigDiskConfigParameters struct { + + // Optional. Size in GB of the boot disk (default is 500GB). + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +type ConfigWorkerConfigObservation struct { + + // Output only. The list of instance names. Dataproc derives the names from cluster_name, num_instances, and the instance group. + InstanceNames []*string `json:"instanceNames,omitempty" tf:"instance_names,omitempty"` + + // Output only. Specifies that this instance group contains preemptible instances. + IsPreemptible *bool `json:"isPreemptible,omitempty" tf:"is_preemptible,omitempty"` + + // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. + ManagedGroupConfig []WorkerConfigManagedGroupConfigObservation `json:"managedGroupConfig,omitempty" tf:"managed_group_config,omitempty"` +} + +type ConfigWorkerConfigParameters struct { + + // Optional. The Compute Engine accelerator configuration for these instances. + // +kubebuilder:validation:Optional + Accelerators []ConfigWorkerConfigAcceleratorsParameters `json:"accelerators,omitempty" tf:"accelerators,omitempty"` + + // Optional. Disk option config settings. + // +kubebuilder:validation:Optional + DiskConfig []ConfigWorkerConfigDiskConfigParameters `json:"diskConfig,omitempty" tf:"disk_config,omitempty"` + + // Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * https://www.googleapis.com/compute/beta/projects/ If the URI is unspecified, it will be inferred from SoftwareConfig.image_version or the system default. + // +kubebuilder:validation:Optional + Image *string `json:"image,omitempty" tf:"image,omitempty"` + + // Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, n1-standard-2`. + // +kubebuilder:validation:Optional + MachineType *string `json:"machineType,omitempty" tf:"machine_type,omitempty"` + + // Optional. Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // +kubebuilder:validation:Optional + MinCPUPlatform *string `json:"minCpuPlatform,omitempty" tf:"min_cpu_platform,omitempty"` + + // Optional. The number of VM instances in the instance group. For master instance groups, must be set to 1. + // +kubebuilder:validation:Optional + NumInstances *float64 `json:"numInstances,omitempty" tf:"num_instances,omitempty"` + + // Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed. The default value for secondary instances is PREEMPTIBLE. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE + // +kubebuilder:validation:Optional + Preemptibility *string `json:"preemptibility,omitempty" tf:"preemptibility,omitempty"` +} + +type GceClusterConfigShieldedInstanceConfigObservation struct { +} + +type GceClusterConfigShieldedInstanceConfigParameters struct { + + // Optional. Defines whether instances have Integrity Monitoring enabled. + // +kubebuilder:validation:Optional + EnableIntegrityMonitoring *bool `json:"enableIntegrityMonitoring,omitempty" tf:"enable_integrity_monitoring,omitempty"` + + // Optional. Defines whether instances have Secure Boot enabled. + // +kubebuilder:validation:Optional + EnableSecureBoot *bool `json:"enableSecureBoot,omitempty" tf:"enable_secure_boot,omitempty"` + + // Optional. Defines whether instances have the vTPM enabled. + // +kubebuilder:validation:Optional + EnableVtpm *bool `json:"enableVtpm,omitempty" tf:"enable_vtpm,omitempty"` +} + +type HadoopJobLoggingConfigObservation struct { +} + +type HadoopJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type HadoopJobObservation struct { +} + +type HadoopJobParameters struct { + + // Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []HadoopJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris. + // +kubebuilder:validation:Optional + MainClass *string `json:"mainClass,omitempty" tf:"main_class,omitempty"` + + // The HCFS URI of the jar file that contains the main class. + // +kubebuilder:validation:Optional + MainJarFileURI *string `json:"mainJarFileUri,omitempty" tf:"main_jar_file_uri,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type HiveJobObservation struct { +} + +type HiveJobParameters struct { + + // Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // A list of queries. + // +kubebuilder:validation:Optional + QueryList []QueryListParameters `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type InitializationActionsObservation struct { +} + +type InitializationActionsParameters struct { + + // Required. Cloud Storage URI of executable file. + // +kubebuilder:validation:Optional + ExecutableFile *string `json:"executableFile,omitempty" tf:"executable_file,omitempty"` + + // Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // +kubebuilder:validation:Optional + ExecutionTimeout *string `json:"executionTimeout,omitempty" tf:"execution_timeout,omitempty"` +} + +type JobsObservation struct { +} + +type JobsParameters struct { + + // Optional. Job is a Hadoop job. + // +kubebuilder:validation:Optional + HadoopJob []HadoopJobParameters `json:"hadoopJob,omitempty" tf:"hadoop_job,omitempty"` + + // Optional. Job is a Hive job. + // +kubebuilder:validation:Optional + HiveJob []HiveJobParameters `json:"hiveJob,omitempty" tf:"hive_job,omitempty"` + + // Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: {0,63} No more than 32 labels can be associated with a given job. + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // Optional. Job is a Pig job. + // +kubebuilder:validation:Optional + PigJob []PigJobParameters `json:"pigJob,omitempty" tf:"pig_job,omitempty"` + + // Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. + // +kubebuilder:validation:Optional + PrerequisiteStepIds []*string `json:"prerequisiteStepIds,omitempty" tf:"prerequisite_step_ids,omitempty"` + + // Optional. Job is a Presto job. + // +kubebuilder:validation:Optional + PrestoJob []PrestoJobParameters `json:"prestoJob,omitempty" tf:"presto_job,omitempty"` + + // Optional. Job is a PySpark job. + // +kubebuilder:validation:Optional + PysparkJob []PysparkJobParameters `json:"pysparkJob,omitempty" tf:"pyspark_job,omitempty"` + + // Optional. Job scheduling configuration. + // +kubebuilder:validation:Optional + Scheduling []JobsSchedulingParameters `json:"scheduling,omitempty" tf:"scheduling,omitempty"` + + // Optional. Job is a Spark job. + // +kubebuilder:validation:Optional + SparkJob []SparkJobParameters `json:"sparkJob,omitempty" tf:"spark_job,omitempty"` + + // Optional. Job is a SparkR job. + // +kubebuilder:validation:Optional + SparkRJob []SparkRJobParameters `json:"sparkRJob,omitempty" tf:"spark_r_job,omitempty"` + + // Optional. Job is a SparkSql job. + // +kubebuilder:validation:Optional + SparkSQLJob []SparkSQLJobParameters `json:"sparkSqlJob,omitempty" tf:"spark_sql_job,omitempty"` + + // Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. + // +kubebuilder:validation:Required + StepID *string `json:"stepId" tf:"step_id,omitempty"` +} + +type JobsSchedulingObservation struct { +} + +type JobsSchedulingParameters struct { + + // Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10. + // +kubebuilder:validation:Optional + MaxFailuresPerHour *float64 `json:"maxFailuresPerHour,omitempty" tf:"max_failures_per_hour,omitempty"` + + // Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240 + // +kubebuilder:validation:Optional + MaxFailuresTotal *float64 `json:"maxFailuresTotal,omitempty" tf:"max_failures_total,omitempty"` +} + +type ManagedClusterConfigObservation struct { + + // Optional. Port/endpoint configuration for this cluster + // +kubebuilder:validation:Optional + EndpointConfig []ConfigEndpointConfigObservation `json:"endpointConfig,omitempty" tf:"endpoint_config,omitempty"` + + // Optional. Lifecycle setting for the cluster. + // +kubebuilder:validation:Optional + LifecycleConfig []ConfigLifecycleConfigObservation `json:"lifecycleConfig,omitempty" tf:"lifecycle_config,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + MasterConfig []ConfigMasterConfigObservation `json:"masterConfig,omitempty" tf:"master_config,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + SecondaryWorkerConfig []ConfigSecondaryWorkerConfigObservation `json:"secondaryWorkerConfig,omitempty" tf:"secondary_worker_config,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + WorkerConfig []ConfigWorkerConfigObservation `json:"workerConfig,omitempty" tf:"worker_config,omitempty"` +} + +type ManagedClusterConfigParameters struct { + + // Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. + // +kubebuilder:validation:Optional + AutoscalingConfig []ConfigAutoscalingConfigParameters `json:"autoscalingConfig,omitempty" tf:"autoscaling_config,omitempty"` + + // Optional. Encryption settings for the cluster. + // +kubebuilder:validation:Optional + EncryptionConfig []ConfigEncryptionConfigParameters `json:"encryptionConfig,omitempty" tf:"encryption_config,omitempty"` + + // Optional. Port/endpoint configuration for this cluster + // +kubebuilder:validation:Optional + EndpointConfig []ConfigEndpointConfigParameters `json:"endpointConfig,omitempty" tf:"endpoint_config,omitempty"` + + // Optional. The shared Compute Engine config settings for all instances in a cluster. + // +kubebuilder:validation:Optional + GceClusterConfig []ConfigGceClusterConfigParameters `json:"gceClusterConfig,omitempty" tf:"gce_cluster_config,omitempty"` + + // Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi + // +kubebuilder:validation:Optional + InitializationActions []InitializationActionsParameters `json:"initializationActions,omitempty" tf:"initialization_actions,omitempty"` + + // Optional. Lifecycle setting for the cluster. + // +kubebuilder:validation:Optional + LifecycleConfig []ConfigLifecycleConfigParameters `json:"lifecycleConfig,omitempty" tf:"lifecycle_config,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + MasterConfig []ConfigMasterConfigParameters `json:"masterConfig,omitempty" tf:"master_config,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + SecondaryWorkerConfig []ConfigSecondaryWorkerConfigParameters `json:"secondaryWorkerConfig,omitempty" tf:"secondary_worker_config,omitempty"` + + // Optional. Security settings for the cluster. + // +kubebuilder:validation:Optional + SecurityConfig []ConfigSecurityConfigParameters `json:"securityConfig,omitempty" tf:"security_config,omitempty"` + + // Optional. The config settings for software inside the cluster. + // +kubebuilder:validation:Optional + SoftwareConfig []ConfigSoftwareConfigParameters `json:"softwareConfig,omitempty" tf:"software_config,omitempty"` + + // Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // +kubebuilder:validation:Optional + StagingBucket *string `json:"stagingBucket,omitempty" tf:"staging_bucket,omitempty"` + + // Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. + // +kubebuilder:validation:Optional + TempBucket *string `json:"tempBucket,omitempty" tf:"temp_bucket,omitempty"` + + // Optional. The Compute Engine config settings for additional worker instances in a cluster. + // +kubebuilder:validation:Optional + WorkerConfig []ConfigWorkerConfigParameters `json:"workerConfig,omitempty" tf:"worker_config,omitempty"` +} + +type ManagedClusterObservation struct { + + // Required. The cluster configuration. + // +kubebuilder:validation:Required + Config []ManagedClusterConfigObservation `json:"config,omitempty" tf:"config,omitempty"` +} + +type ManagedClusterParameters struct { + + // Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. + // +kubebuilder:validation:Required + ClusterName *string `json:"clusterName" tf:"cluster_name,omitempty"` + + // Required. The cluster configuration. + // +kubebuilder:validation:Required + Config []ManagedClusterConfigParameters `json:"config" tf:"config,omitempty"` + + // Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster. + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` +} + +type ManagedGroupConfigObservation struct { + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceGroupManagerName *string `json:"instanceGroupManagerName,omitempty" tf:"instance_group_manager_name,omitempty"` + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceTemplateName *string `json:"instanceTemplateName,omitempty" tf:"instance_template_name,omitempty"` +} + +type ManagedGroupConfigParameters struct { +} + +type MasterConfigAcceleratorsObservation struct { +} + +type MasterConfigAcceleratorsParameters struct { + + // The number of the accelerator cards of this type exposed to this instance. + // +kubebuilder:validation:Optional + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type MasterConfigDiskConfigObservation struct { +} + +type MasterConfigDiskConfigParameters struct { + + // Optional. Size in GB of the boot disk (default is 500GB). + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +type NodeGroupAffinityObservation struct { +} + +type NodeGroupAffinityParameters struct { + + // Required. The URI of a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1` + // +kubebuilder:validation:Required + NodeGroup *string `json:"nodeGroup" tf:"node_group,omitempty"` +} + +type ParametersObservation struct { +} + +type ParametersParameters struct { + + // Optional. Brief description of the parameter. Must not exceed 1024 characters. + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a .sparkJob.args + // +kubebuilder:validation:Required + Fields []*string `json:"fields" tf:"fields,omitempty"` + + // Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. + // +kubebuilder:validation:Required + Name *string `json:"name" tf:"name,omitempty"` + + // Optional. Validation rules to be applied to this parameter's value. + // +kubebuilder:validation:Optional + Validation []ValidationParameters `json:"validation,omitempty" tf:"validation,omitempty"` +} + +type PigJobLoggingConfigObservation struct { +} + +type PigJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type PigJobObservation struct { +} + +type PigJobParameters struct { + + // Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []PigJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // A list of queries. + // +kubebuilder:validation:Optional + QueryList []PigJobQueryListParameters `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type PigJobQueryListObservation struct { +} + +type PigJobQueryListParameters struct { + + // Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } + // +kubebuilder:validation:Required + Queries []*string `json:"queries" tf:"queries,omitempty"` +} + +type PrestoJobLoggingConfigObservation struct { +} + +type PrestoJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type PrestoJobObservation struct { +} + +type PrestoJobParameters struct { + + // Optional. Presto client tags to attach to this query + // +kubebuilder:validation:Optional + ClientTags []*string `json:"clientTags,omitempty" tf:"client_tags,omitempty"` + + // Optional. Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. + // +kubebuilder:validation:Optional + ContinueOnFailure *bool `json:"continueOnFailure,omitempty" tf:"continue_on_failure,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []PrestoJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats + // +kubebuilder:validation:Optional + OutputFormat *string `json:"outputFormat,omitempty" tf:"output_format,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // A list of queries. + // +kubebuilder:validation:Optional + QueryList []PrestoJobQueryListParameters `json:"queryList,omitempty" tf:"query_list,omitempty"` +} + +type PrestoJobQueryListObservation struct { +} + +type PrestoJobQueryListParameters struct { + + // Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } + // +kubebuilder:validation:Required + Queries []*string `json:"queries" tf:"queries,omitempty"` +} + +type PysparkJobLoggingConfigObservation struct { +} + +type PysparkJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type PysparkJobObservation struct { +} + +type PysparkJobParameters struct { + + // Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []PysparkJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. + // +kubebuilder:validation:Required + MainPythonFileURI *string `json:"mainPythonFileUri" tf:"main_python_file_uri,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. + // +kubebuilder:validation:Optional + PythonFileUris []*string `json:"pythonFileUris,omitempty" tf:"python_file_uris,omitempty"` +} + +type QueryListObservation struct { +} + +type QueryListParameters struct { + + // Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } + // +kubebuilder:validation:Required + Queries []*string `json:"queries" tf:"queries,omitempty"` +} + +type RegexObservation struct { +} + +type RegexParameters struct { + + // Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). + // +kubebuilder:validation:Required + Regexes []*string `json:"regexes" tf:"regexes,omitempty"` +} + +type ReservationAffinityObservation struct { +} + +type ReservationAffinityParameters struct { + + // Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION + // +kubebuilder:validation:Optional + ConsumeReservationType *string `json:"consumeReservationType,omitempty" tf:"consume_reservation_type,omitempty"` + + // Optional. Corresponds to the label key of reservation resource. + // +kubebuilder:validation:Optional + Key *string `json:"key,omitempty" tf:"key,omitempty"` + + // Required. List of allowed values for the parameter. + // +kubebuilder:validation:Optional + Values []*string `json:"values,omitempty" tf:"values,omitempty"` +} + +type SecondaryWorkerConfigAcceleratorsObservation struct { +} + +type SecondaryWorkerConfigAcceleratorsParameters struct { + + // The number of the accelerator cards of this type exposed to this instance. + // +kubebuilder:validation:Optional + AcceleratorCount *float64 `json:"acceleratorCount,omitempty" tf:"accelerator_count,omitempty"` + + // Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80. + // +kubebuilder:validation:Optional + AcceleratorType *string `json:"acceleratorType,omitempty" tf:"accelerator_type,omitempty"` +} + +type SecondaryWorkerConfigDiskConfigObservation struct { +} + +type SecondaryWorkerConfigDiskConfigParameters struct { + + // Optional. Size in GB of the boot disk (default is 500GB). + // +kubebuilder:validation:Optional + BootDiskSizeGb *float64 `json:"bootDiskSizeGb,omitempty" tf:"boot_disk_size_gb,omitempty"` + + // Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). + // +kubebuilder:validation:Optional + BootDiskType *string `json:"bootDiskType,omitempty" tf:"boot_disk_type,omitempty"` + + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. + // +kubebuilder:validation:Optional + NumLocalSsds *float64 `json:"numLocalSsds,omitempty" tf:"num_local_ssds,omitempty"` +} + +type SecondaryWorkerConfigManagedGroupConfigObservation struct { + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceGroupManagerName *string `json:"instanceGroupManagerName,omitempty" tf:"instance_group_manager_name,omitempty"` + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceTemplateName *string `json:"instanceTemplateName,omitempty" tf:"instance_template_name,omitempty"` +} + +type SecondaryWorkerConfigManagedGroupConfigParameters struct { +} + +type SecurityConfigKerberosConfigObservation struct { +} + +type SecurityConfigKerberosConfigParameters struct { + + // Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty" tf:"cross_realm_trust_admin_server,omitempty"` + + // Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty" tf:"cross_realm_trust_kdc,omitempty"` + + // Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. + // +kubebuilder:validation:Optional + CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty" tf:"cross_realm_trust_realm,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship. + // +kubebuilder:validation:Optional + CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword,omitempty" tf:"cross_realm_trust_shared_password,omitempty"` + + // Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. + // +kubebuilder:validation:Optional + EnableKerberos *bool `json:"enableKerberos,omitempty" tf:"enable_kerberos,omitempty"` + + // Optional. The uri of the KMS key used to encrypt various sensitive files. + // +kubebuilder:validation:Optional + KMSKey *string `json:"kmsKey,omitempty" tf:"kms_key,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. + // +kubebuilder:validation:Optional + KdcDBKey *string `json:"kdcDbKey,omitempty" tf:"kdc_db_key,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. + // +kubebuilder:validation:Optional + KeyPassword *string `json:"keyPassword,omitempty" tf:"key_password,omitempty"` + + // Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + // +kubebuilder:validation:Optional + Keystore *string `json:"keystore,omitempty" tf:"keystore,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. + // +kubebuilder:validation:Optional + KeystorePassword *string `json:"keystorePassword,omitempty" tf:"keystore_password,omitempty"` + + // Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. + // +kubebuilder:validation:Optional + Realm *string `json:"realm,omitempty" tf:"realm,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. + // +kubebuilder:validation:Optional + RootPrincipalPassword *string `json:"rootPrincipalPassword,omitempty" tf:"root_principal_password,omitempty"` + + // Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. + // +kubebuilder:validation:Optional + TgtLifetimeHours *float64 `json:"tgtLifetimeHours,omitempty" tf:"tgt_lifetime_hours,omitempty"` + + // Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. + // +kubebuilder:validation:Optional + Truststore *string `json:"truststore,omitempty" tf:"truststore,omitempty"` + + // Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. + // +kubebuilder:validation:Optional + TruststorePassword *string `json:"truststorePassword,omitempty" tf:"truststore_password,omitempty"` +} + +type SparkJobLoggingConfigObservation struct { +} + +type SparkJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type SparkJobObservation struct { +} + +type SparkJobParameters struct { + + // Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []SparkJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris. + // +kubebuilder:validation:Optional + MainClass *string `json:"mainClass,omitempty" tf:"main_class,omitempty"` + + // The HCFS URI of the jar file that contains the main class. + // +kubebuilder:validation:Optional + MainJarFileURI *string `json:"mainJarFileUri,omitempty" tf:"main_jar_file_uri,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type SparkRJobLoggingConfigObservation struct { +} + +type SparkRJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type SparkRJobObservation struct { +} + +type SparkRJobParameters struct { + + // Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. + // +kubebuilder:validation:Optional + ArchiveUris []*string `json:"archiveUris,omitempty" tf:"archive_uris,omitempty"` + + // Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. + // +kubebuilder:validation:Optional + Args []*string `json:"args,omitempty" tf:"args,omitempty"` + + // Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. + // +kubebuilder:validation:Optional + FileUris []*string `json:"fileUris,omitempty" tf:"file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []SparkRJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. + // +kubebuilder:validation:Required + MainRFileURI *string `json:"mainRFileUri" tf:"main_r_file_uri,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` +} + +type SparkSQLJobLoggingConfigObservation struct { +} + +type SparkSQLJobLoggingConfigParameters struct { + + // The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + // +kubebuilder:validation:Optional + DriverLogLevels map[string]*string `json:"driverLogLevels,omitempty" tf:"driver_log_levels,omitempty"` +} + +type SparkSQLJobObservation struct { +} + +type SparkSQLJobParameters struct { + + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + // +kubebuilder:validation:Optional + JarFileUris []*string `json:"jarFileUris,omitempty" tf:"jar_file_uris,omitempty"` + + // Optional. The runtime log config for job execution. + // +kubebuilder:validation:Optional + LoggingConfig []SparkSQLJobLoggingConfigParameters `json:"loggingConfig,omitempty" tf:"logging_config,omitempty"` + + // Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. + // +kubebuilder:validation:Optional + Properties map[string]*string `json:"properties,omitempty" tf:"properties,omitempty"` + + // The HCFS URI of the script that contains SQL queries. + // +kubebuilder:validation:Optional + QueryFileURI *string `json:"queryFileUri,omitempty" tf:"query_file_uri,omitempty"` + + // A list of queries. + // +kubebuilder:validation:Optional + QueryList []SparkSQLJobQueryListParameters `json:"queryList,omitempty" tf:"query_list,omitempty"` + + // Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";). + // +kubebuilder:validation:Optional + ScriptVariables map[string]*string `json:"scriptVariables,omitempty" tf:"script_variables,omitempty"` +} + +type SparkSQLJobQueryListObservation struct { +} + +type SparkSQLJobQueryListParameters struct { + + // Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { "queryList": { "queries": } } + // +kubebuilder:validation:Required + Queries []*string `json:"queries" tf:"queries,omitempty"` +} + +type ValidationObservation struct { +} + +type ValidationParameters struct { + + // Validation based on regular expressions. + // +kubebuilder:validation:Optional + Regex []RegexParameters `json:"regex,omitempty" tf:"regex,omitempty"` + + // Required. List of allowed values for the parameter. + // +kubebuilder:validation:Optional + Values []ValuesParameters `json:"values,omitempty" tf:"values,omitempty"` +} + +type ValuesObservation struct { +} + +type ValuesParameters struct { + + // Required. List of allowed values for the parameter. + // +kubebuilder:validation:Required + Values []*string `json:"values" tf:"values,omitempty"` +} + +type WorkerConfigManagedGroupConfigObservation struct { + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceGroupManagerName *string `json:"instanceGroupManagerName,omitempty" tf:"instance_group_manager_name,omitempty"` + + // Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For projects.regions.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} * For projects.locations.workflowTemplates, the resource name of the template has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id} + InstanceTemplateName *string `json:"instanceTemplateName,omitempty" tf:"instance_template_name,omitempty"` +} + +type WorkerConfigManagedGroupConfigParameters struct { +} + +type WorkflowTemplateObservation struct { + + // Output only. The time template was created. + CreateTime *string `json:"createTime,omitempty" tf:"create_time,omitempty"` + + // an identifier for the resource with format projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}} + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Required. WorkflowTemplate scheduling information. + // +kubebuilder:validation:Required + Placement []WorkflowTemplatePlacementObservation `json:"placement,omitempty" tf:"placement,omitempty"` + + // Output only. The time template was last updated. + UpdateTime *string `json:"updateTime,omitempty" tf:"update_time,omitempty"` +} + +type WorkflowTemplateParameters struct { + + // (Beta only) Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and "d" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration must be from 10 minutes ("10m") to 24 hours ("24h" or "1d"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted. + // +kubebuilder:validation:Optional + DagTimeout *string `json:"dagTimeout,omitempty" tf:"dag_timeout,omitempty"` + + // Required. The Directed Acyclic Graph of Jobs to submit. + // +kubebuilder:validation:Required + Jobs []JobsParameters `json:"jobs" tf:"jobs,omitempty"` + + // Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: {0,63} No more than 32 labels can be associated with a given cluster. + // +kubebuilder:validation:Optional + Labels map[string]*string `json:"labels,omitempty" tf:"labels,omitempty"` + + // The location for the resource + // +kubebuilder:validation:Required + Location *string `json:"location" tf:"location,omitempty"` + + // Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. + // +kubebuilder:validation:Optional + Parameters []ParametersParameters `json:"parameters,omitempty" tf:"parameters,omitempty"` + + // Required. WorkflowTemplate scheduling information. + // +kubebuilder:validation:Required + Placement []WorkflowTemplatePlacementParameters `json:"placement" tf:"placement,omitempty"` + + // The project for the resource + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Optional. Used to perform a consistent read-modify-write. This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. + // +kubebuilder:validation:Optional + Version *float64 `json:"version,omitempty" tf:"version,omitempty"` +} + +type WorkflowTemplatePlacementObservation struct { + + // A cluster that is managed by the workflow. + // +kubebuilder:validation:Optional + ManagedCluster []ManagedClusterObservation `json:"managedCluster,omitempty" tf:"managed_cluster,omitempty"` +} + +type WorkflowTemplatePlacementParameters struct { + + // Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted. + // +kubebuilder:validation:Optional + ClusterSelector []ClusterSelectorParameters `json:"clusterSelector,omitempty" tf:"cluster_selector,omitempty"` + + // A cluster that is managed by the workflow. + // +kubebuilder:validation:Optional + ManagedCluster []ManagedClusterParameters `json:"managedCluster,omitempty" tf:"managed_cluster,omitempty"` +} + +// WorkflowTemplateSpec defines the desired state of WorkflowTemplate +type WorkflowTemplateSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider WorkflowTemplateParameters `json:"forProvider"` +} + +// WorkflowTemplateStatus defines the observed state of WorkflowTemplate. +type WorkflowTemplateStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider WorkflowTemplateObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkflowTemplate is the Schema for the WorkflowTemplates API. A Workflow Template is a reusable workflow configuration. +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type WorkflowTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec WorkflowTemplateSpec `json:"spec"` + Status WorkflowTemplateStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// WorkflowTemplateList contains a list of WorkflowTemplates +type WorkflowTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []WorkflowTemplate `json:"items"` +} + +// Repository type metadata. +var ( + WorkflowTemplate_Kind = "WorkflowTemplate" + WorkflowTemplate_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: WorkflowTemplate_Kind}.String() + WorkflowTemplate_KindAPIVersion = WorkflowTemplate_Kind + "." + CRDGroupVersion.String() + WorkflowTemplate_GroupVersionKind = CRDGroupVersion.WithKind(WorkflowTemplate_Kind) +) + +func init() { + SchemeBuilder.Register(&WorkflowTemplate{}, &WorkflowTemplateList{}) +} diff --git a/apis/zz_register.go b/apis/zz_register.go index ac04315ee..22c5ba6f4 100755 --- a/apis/zz_register.go +++ b/apis/zz_register.go @@ -44,6 +44,7 @@ import ( v1beta1datacatalog "github.com/upbound/provider-gcp/apis/datacatalog/v1beta1" v1beta1dataflow "github.com/upbound/provider-gcp/apis/dataflow/v1beta1" v1beta1datafusion "github.com/upbound/provider-gcp/apis/datafusion/v1beta1" + v1beta1dataproc "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" v1beta1datastore "github.com/upbound/provider-gcp/apis/datastore/v1beta1" v1beta1dialogflowcx "github.com/upbound/provider-gcp/apis/dialogflowcx/v1beta1" v1beta1dns "github.com/upbound/provider-gcp/apis/dns/v1beta1" @@ -98,6 +99,7 @@ func init() { v1beta1datacatalog.SchemeBuilder.AddToScheme, v1beta1dataflow.SchemeBuilder.AddToScheme, v1beta1datafusion.SchemeBuilder.AddToScheme, + v1beta1dataproc.SchemeBuilder.AddToScheme, v1beta1datastore.SchemeBuilder.AddToScheme, v1beta1dialogflowcx.SchemeBuilder.AddToScheme, v1beta1dns.SchemeBuilder.AddToScheme, diff --git a/config/externalname.go b/config/externalname.go index 22a2e4a19..c8b8d6af3 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -706,6 +706,17 @@ var externalNameConfigs = map[string]config.ExternalName{ "google_binary_authorization_attestor": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/attestors/{{ .external_name }}"), // projects/{{project}} "google_binary_authorization_policy": config.TemplatedStringAsIdentifier("", "projects/{{ .setup.configuration.project }}"), + + // dataproc + // + // Imported by using the following format: projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}} + "google_dataproc_autoscaling_policy": config.TemplatedStringAsIdentifier("policy_id", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/autoscalingPolicies/{{ .external_name }}"), + // No Import + "google_dataproc_cluster": config.IdentifierFromProvider, + // No Import + "google_dataproc_job": config.IdentifierFromProvider, + // Imported by using the following format: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}} + "google_dataproc_workflow_template": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/workflowTemplates/{{ .external_name }}"), } // TemplatedStringAsIdentifierWithNoName uses TemplatedStringAsIdentifier but diff --git a/config/externalnamenottested.go b/config/externalnamenottested.go index d1a7d5259..d068e4296 100644 --- a/config/externalnamenottested.go +++ b/config/externalnamenottested.go @@ -160,26 +160,18 @@ var ExternalNameNotTestedConfigs = map[string]config.ExternalName{ // dataproc // - // Imported by using the following format: projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}} - "google_dataproc_autoscaling_policy": config.TemplatedStringAsIdentifier("policy_id", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/autoscalingPolicies/{{ .external_name }}"), - // No Import - "google_dataproc_cluster": config.IdentifierFromProvider, // Imported by using the following format: projects/{project}/regions/{region}/clusters/{cluster} roles/editor "google_dataproc_cluster_iam_binding": config.TemplatedStringAsIdentifier("cluster", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/clusters/{{ .external_name }} {{ .parameters.role }}"), // Imported by using the following format: projects/{project}/regions/{region}/clusters/{cluster} roles/editor user:jane@example.com "google_dataproc_cluster_iam_member": config.TemplatedStringAsIdentifier("cluster", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/clusters/{{ .external_name }} {{ .parameters.role }} {{ .parameters.member }}"), // Imported by using the following format: projects/{project}/regions/{region}/clusters/{cluster} "google_dataproc_cluster_iam_policy": config.TemplatedStringAsIdentifier("cluster", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/clusters/{{ .external_name }}"), - // No Import - "google_dataproc_job": config.IdentifierFromProvider, // Imported by using the following format: projects/{project}/regions/{region}/jobs/{job_id} roles/editor "google_dataproc_job_iam_binding": config.TemplatedStringAsIdentifier("job_id", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/jobs/{{ .external_name }} {{ .parameters.role }}"), // Imported by using the following format: projects/{project}/regions/{region}/jobs/{job_id} roles/editor user:jane@example.com "google_dataproc_job_iam_member": config.TemplatedStringAsIdentifier("job_id", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/jobs/{{ .external_name }} {{ .parameters.role }} {{ .parameters.member }}"), // Imported by using the following format: projects/{project}/regions/{region}/jobs/{job_id} "google_dataproc_job_iam_policy": config.TemplatedStringAsIdentifier("job_id", "projects/{ .setup.configuration.project }/regions/{{ .parameters.region }}/jobs/{{ .external_name }}"), - // Imported by using the following format: projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}} - "google_dataproc_workflow_template": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/workflowTemplates/{{ .external_name }}"), // deploymentmanager // diff --git a/examples-generated/dataproc/autoscalingpolicy.yaml b/examples-generated/dataproc/autoscalingpolicy.yaml new file mode 100644 index 000000000..249c996ed --- /dev/null +++ b/examples-generated/dataproc/autoscalingpolicy.yaml @@ -0,0 +1,39 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: AutoscalingPolicy +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/autoscalingpolicy + labels: + testing.upbound.io/example-name: asp + name: asp +spec: + forProvider: + basicAlgorithm: + - yarnConfig: + - gracefulDecommissionTimeout: 30s + scaleDownFactor: 0.5 + scaleUpFactor: 0.5 + location: us-central1 + workerConfig: + - maxInstances: 3 + +--- + +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/autoscalingpolicy + labels: + testing.upbound.io/example-name: basic + name: basic +spec: + forProvider: + clusterConfig: + - autoscalingConfig: + - policyUri: ${google_dataproc_autoscaling_policy.asp.name} + name: dataproc-policy + region: us-central1 + +--- + diff --git a/examples-generated/dataproc/cluster.yaml b/examples-generated/dataproc/cluster.yaml new file mode 100644 index 000000000..a6faa259b --- /dev/null +++ b/examples-generated/dataproc/cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/cluster + labels: + testing.upbound.io/example-name: simplecluster + name: simplecluster +spec: + forProvider: + name: simplecluster + region: us-central1 + +--- + diff --git a/examples-generated/dataproc/job.yaml b/examples-generated/dataproc/job.yaml new file mode 100644 index 000000000..64ea36157 --- /dev/null +++ b/examples-generated/dataproc/job.yaml @@ -0,0 +1,47 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Job +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/job + labels: + testing.upbound.io/example-name: spark + name: spark +spec: + forProvider: + forceDelete: true + placement: + - clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: mycluster + regionSelector: + matchLabels: + testing.upbound.io/example-name: mycluster + sparkConfig: + - args: + - "1000" + jarFileUris: + - file:///usr/lib/spark/examples/jars/spark-examples.jar + loggingConfig: + - driverLogLevels: + root: INFO + mainClass: org.apache.spark.examples.SparkPi + properties: + spark.logConf: "true" + +--- + +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/job + labels: + testing.upbound.io/example-name: mycluster + name: mycluster +spec: + forProvider: + name: dproc-cluster-unique-name + region: us-central1 + +--- + diff --git a/examples-generated/dataproc/workflowtemplate.yaml b/examples-generated/dataproc/workflowtemplate.yaml new file mode 100644 index 000000000..2c055bf32 --- /dev/null +++ b/examples-generated/dataproc/workflowtemplate.yaml @@ -0,0 +1,48 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: WorkflowTemplate +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/workflowtemplate + labels: + testing.upbound.io/example-name: template + name: template +spec: + forProvider: + jobs: + - sparkJob: + - mainClass: SomeClass + stepId: someJob + - prerequisiteStepIds: + - someJob + prestoJob: + - queryFileUri: someuri + stepId: otherJob + location: us-central1 + placement: + - managedCluster: + - clusterName: my-cluster + config: + - gceClusterConfig: + - tags: + - foo + - bar + zone: us-central1-a + masterConfig: + - diskConfig: + - bootDiskSizeGb: 15 + bootDiskType: pd-ssd + machineType: n1-standard-1 + numInstances: 1 + secondaryWorkerConfig: + - numInstances: 2 + softwareConfig: + - imageVersion: 2.0.35-debian10 + workerConfig: + - diskConfig: + - bootDiskSizeGb: 10 + numLocalSsds: 2 + machineType: n1-standard-2 + numInstances: 3 + +--- + diff --git a/examples/dataproc/autoscalingpolicy.yaml b/examples/dataproc/autoscalingpolicy.yaml new file mode 100644 index 000000000..5f3992e2f --- /dev/null +++ b/examples/dataproc/autoscalingpolicy.yaml @@ -0,0 +1,18 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: AutoscalingPolicy +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/autoscalingpolicy + labels: + testing.upbound.io/example-name: asp + name: asp +spec: + forProvider: + basicAlgorithm: + - yarnConfig: + - gracefulDecommissionTimeout: 30s + scaleDownFactor: 0.5 + scaleUpFactor: 0.5 + location: us-central1 + workerConfig: + - maxInstances: 3 \ No newline at end of file diff --git a/examples/dataproc/cluster.yaml b/examples/dataproc/cluster.yaml new file mode 100644 index 000000000..5bfef73b3 --- /dev/null +++ b/examples/dataproc/cluster.yaml @@ -0,0 +1,16 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/cluster + labels: + testing.upbound.io/example-name: simplecluster + name: simplecluster +spec: + forProvider: + name: simplecluster + region: us-central1 + clusterConfig: + - preemptibleWorkerConfig: + - diskConfig: + - bootDiskType: pd-standard diff --git a/examples/dataproc/job.yaml b/examples/dataproc/job.yaml new file mode 100644 index 000000000..1ab17eb76 --- /dev/null +++ b/examples/dataproc/job.yaml @@ -0,0 +1,48 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Job +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/job + labels: + testing.upbound.io/example-name: spark + name: spark +spec: + forProvider: + forceDelete: true + placement: + - clusterNameSelector: + matchLabels: + testing.upbound.io/example-name: mycluster + regionSelector: + matchLabels: + testing.upbound.io/example-name: mycluster + sparkConfig: + - args: + - "1000" + jarFileUris: + - file:///usr/lib/spark/examples/jars/spark-examples.jar + loggingConfig: + - driverLogLevels: + root: INFO + mainClass: org.apache.spark.examples.SparkPi + properties: + spark.logConf: "true" + +--- + +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: Cluster +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/job + labels: + testing.upbound.io/example-name: mycluster + name: mycluster +spec: + forProvider: + name: dproc-cluster-unique-name + region: us-central1 + clusterConfig: + - preemptibleWorkerConfig: + - diskConfig: + - bootDiskType: pd-standard \ No newline at end of file diff --git a/examples/dataproc/workflowtemplate.yaml b/examples/dataproc/workflowtemplate.yaml new file mode 100644 index 000000000..00e87fcf8 --- /dev/null +++ b/examples/dataproc/workflowtemplate.yaml @@ -0,0 +1,45 @@ +apiVersion: dataproc.gcp.upbound.io/v1beta1 +kind: WorkflowTemplate +metadata: + annotations: + meta.upbound.io/example-id: dataproc/v1beta1/workflowtemplate + labels: + testing.upbound.io/example-name: template + name: template +spec: + forProvider: + jobs: + - sparkJob: + - mainClass: SomeClass + stepId: someJob + - prerequisiteStepIds: + - someJob + prestoJob: + - queryFileUri: someuri + stepId: otherJob + location: us-central1 + placement: + - managedCluster: + - clusterName: my-cluster + config: + - gceClusterConfig: + - tags: + - foo + - bar + zone: us-central1-a + masterConfig: + - diskConfig: + - bootDiskSizeGb: 15 + bootDiskType: pd-ssd + machineType: n1-standard-1 + numInstances: 1 + secondaryWorkerConfig: + - numInstances: 2 + softwareConfig: + - imageVersion: 2.0.35-debian10 + workerConfig: + - diskConfig: + - bootDiskSizeGb: 10 + numLocalSsds: 2 + machineType: n1-standard-2 + numInstances: 3 \ No newline at end of file diff --git a/internal/controller/dataproc/autoscalingpolicy/zz_controller.go b/internal/controller/dataproc/autoscalingpolicy/zz_controller.go new file mode 100755 index 000000000..ae8eb089d --- /dev/null +++ b/internal/controller/dataproc/autoscalingpolicy/zz_controller.go @@ -0,0 +1,64 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package autoscalingpolicy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" +) + +// Setup adds a controller that reconciles AutoscalingPolicy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.AutoscalingPolicy_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.AutoscalingPolicy_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_dataproc_autoscaling_policy"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.AutoscalingPolicy_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.AutoscalingPolicy{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataproc/cluster/zz_controller.go b/internal/controller/dataproc/cluster/zz_controller.go new file mode 100755 index 000000000..d30620d14 --- /dev/null +++ b/internal/controller/dataproc/cluster/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package cluster + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" +) + +// Setup adds a controller that reconciles Cluster managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Cluster_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.Cluster_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_dataproc_cluster"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Cluster_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.Cluster{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataproc/job/zz_controller.go b/internal/controller/dataproc/job/zz_controller.go new file mode 100755 index 000000000..947ecf48e --- /dev/null +++ b/internal/controller/dataproc/job/zz_controller.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package job + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" +) + +// Setup adds a controller that reconciles Job managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.Job_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.Job_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_dataproc_job"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.Job_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.Job{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataproc/workflowtemplate/zz_controller.go b/internal/controller/dataproc/workflowtemplate/zz_controller.go new file mode 100755 index 000000000..a4a6f02e7 --- /dev/null +++ b/internal/controller/dataproc/workflowtemplate/zz_controller.go @@ -0,0 +1,64 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by upjet. DO NOT EDIT. + +package workflowtemplate + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + tjcontroller "github.com/upbound/upjet/pkg/controller" + "github.com/upbound/upjet/pkg/terraform" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataproc/v1beta1" +) + +// Setup adds a controller that reconciles WorkflowTemplate managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.WorkflowTemplate_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK)) + } + r := managed.NewReconciler(mgr, + xpresource.ManagedKind(v1beta1.WorkflowTemplate_GroupVersionKind), + managed.WithExternalConnecter(tjcontroller.NewConnector(mgr.GetClient(), o.WorkspaceStore, o.SetupFn, o.Provider.Resources["google_dataproc_workflow_template"], + tjcontroller.WithCallbackProvider(tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.WorkflowTemplate_GroupVersionKind))), + )), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(terraform.NewWorkspaceFinalizer(o.WorkspaceStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3*time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + ) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + For(&v1beta1.WorkflowTemplate{}). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_setup.go b/internal/controller/zz_setup.go index fc14c04a3..8531392ba 100755 --- a/internal/controller/zz_setup.go +++ b/internal/controller/zz_setup.go @@ -167,6 +167,10 @@ import ( tagtemplate "github.com/upbound/provider-gcp/internal/controller/datacatalog/tagtemplate" jobdataflow "github.com/upbound/provider-gcp/internal/controller/dataflow/job" instancedatafusion "github.com/upbound/provider-gcp/internal/controller/datafusion/instance" + autoscalingpolicy "github.com/upbound/provider-gcp/internal/controller/dataproc/autoscalingpolicy" + clusterdataproc "github.com/upbound/provider-gcp/internal/controller/dataproc/cluster" + jobdataproc "github.com/upbound/provider-gcp/internal/controller/dataproc/job" + workflowtemplate "github.com/upbound/provider-gcp/internal/controller/dataproc/workflowtemplate" index "github.com/upbound/provider-gcp/internal/controller/datastore/index" agent "github.com/upbound/provider-gcp/internal/controller/dialogflowcx/agent" entitytype "github.com/upbound/provider-gcp/internal/controller/dialogflowcx/entitytype" @@ -416,6 +420,10 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { tagtemplate.Setup, jobdataflow.Setup, instancedatafusion.Setup, + autoscalingpolicy.Setup, + clusterdataproc.Setup, + jobdataproc.Setup, + workflowtemplate.Setup, index.Setup, agent.Setup, entitytype.Setup, diff --git a/package/crds/dataproc.gcp.upbound.io_autoscalingpolicies.yaml b/package/crds/dataproc.gcp.upbound.io_autoscalingpolicies.yaml new file mode 100644 index 000000000..4d48073df --- /dev/null +++ b/package/crds/dataproc.gcp.upbound.io_autoscalingpolicies.yaml @@ -0,0 +1,452 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: autoscalingpolicies.dataproc.gcp.upbound.io +spec: + group: dataproc.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: AutoscalingPolicy + listKind: AutoscalingPolicyList + plural: autoscalingpolicies + singular: autoscalingpolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AutoscalingPolicy is the Schema for the AutoscalingPolicys API. + Describes an autoscaling policy for Dataproc cluster autoscaler. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutoscalingPolicySpec defines the desired state of AutoscalingPolicy + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + basicAlgorithm: + description: Basic algorithm for autoscaling. Structure is documented + below. + items: + properties: + cooldownPeriod: + description: 'Duration between scaling events. A scaling + period starts after the update operation from the previous + event has completed. Bounds: [2m, 1d]. Default: 2m.' + type: string + yarnConfig: + description: YARN autoscaling configuration. Structure is + documented below. + items: + properties: + gracefulDecommissionTimeout: + description: 'Timeout for YARN graceful decommissioning + of Node Managers. Specifies the duration to wait + for jobs to complete before forcefully removing + workers (and potentially interrupting jobs). Only + applicable to downscaling operations. Bounds: [0s, + 1d].' + type: string + scaleDownFactor: + description: 'Fraction of average pending memory in + the last cooldown period for which to remove workers. + A scale-down factor of 1 will result in scaling + down so that there is no available memory remaining + after the update (more aggressive scaling). A scale-down + factor of 0 disables removing workers, which can + be beneficial for autoscaling a single job. Bounds: + [0.0, 1.0].' + type: number + scaleDownMinWorkerFraction: + description: 'Minimum scale-down threshold as a fraction + of total cluster size before scaling occurs. For + example, in a 20-worker cluster, a threshold of + 0.1 means the autoscaler must recommend at least + a 2 worker scale-down for the cluster to scale. + A threshold of 0 means the autoscaler will scale + down on any recommended change. Bounds: [0.0, 1.0]. + Default: 0.0.' + type: number + scaleUpFactor: + description: 'Fraction of average pending memory in + the last cooldown period for which to add workers. + A scale-up factor of 1.0 will result in scaling + up so that there is no pending memory remaining + after the update (more aggressive scaling). A scale-up + factor closer to 0 will result in a smaller magnitude + of scaling up (less aggressive scaling). Bounds: + [0.0, 1.0].' + type: number + scaleUpMinWorkerFraction: + description: 'Minimum scale-up threshold as a fraction + of total cluster size before scaling occurs. For + example, in a 20-worker cluster, a threshold of + 0.1 means the autoscaler must recommend at least + a 2-worker scale-up for the cluster to scale. A + threshold of 0 means the autoscaler will scale up + on any recommended change. Bounds: [0.0, 1.0]. Default: + 0.0.' + type: number + required: + - gracefulDecommissionTimeout + - scaleDownFactor + - scaleUpFactor + type: object + type: array + required: + - yarnConfig + type: object + type: array + location: + description: The location where the autoscaling policy should + reside. The default value is global. + type: string + project: + description: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + type: string + secondaryWorkerConfig: + description: Describes how the autoscaler will operate for secondary + workers. Structure is documented below. + items: + properties: + maxInstances: + description: 'Maximum number of instances for this group. + Note that by default, clusters will not use secondary + workers. Required for secondary workers if the minimum + secondary instances is set. Bounds: [minInstances, ). + Defaults to 0.' + type: number + minInstances: + description: 'Minimum number of instances for this group. + Bounds: [0, maxInstances]. Defaults to 0.' + type: number + weight: + description: 'Weight for the instance group, which is used + to determine the fraction of total workers in the cluster + from this instance group. For example, if primary workers + have weight 2, and secondary workers have weight 1, the + cluster will have approximately 2 primary workers for + each secondary worker. The cluster may not reach the specified + balance if constrained by min/max bounds or other autoscaling + settings. For example, if maxInstances for secondary workers + is 0, then only primary workers will be added. The cluster + can also be out of balance when created. If weight is + not set on any instance group, the cluster will default + to equal weight for all groups: the cluster will attempt + to maintain an equal number of workers in each group within + the configured size bounds for each group. If weight is + set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set + only on primary workers, the cluster will use primary + workers only and no secondary workers.' + type: number + type: object + type: array + workerConfig: + description: Describes how the autoscaler will operate for primary + workers. Structure is documented below. + items: + properties: + maxInstances: + description: Maximum number of instances for this group. + type: number + minInstances: + description: 'Minimum number of instances for this group. + Bounds: [2, maxInstances]. Defaults to 2.' + type: number + weight: + description: 'Weight for the instance group, which is used + to determine the fraction of total workers in the cluster + from this instance group. For example, if primary workers + have weight 2, and secondary workers have weight 1, the + cluster will have approximately 2 primary workers for + each secondary worker. The cluster may not reach the specified + balance if constrained by min/max bounds or other autoscaling + settings. For example, if maxInstances for secondary workers + is 0, then only primary workers will be added. The cluster + can also be out of balance when created. If weight is + not set on any instance group, the cluster will default + to equal weight for all groups: the cluster will attempt + to maintain an equal number of workers in each group within + the configured size bounds for each group. If weight is + set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set + only on primary workers, the cluster will use primary + workers only and no secondary workers.' + type: number + required: + - maxInstances + type: object + type: array + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: AutoscalingPolicyStatus defines the observed state of AutoscalingPolicy. + properties: + atProvider: + properties: + id: + description: an identifier for the resource with format projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}} + type: string + name: + description: The "resource name" of the autoscaling policy. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/dataproc.gcp.upbound.io_clusters.yaml b/package/crds/dataproc.gcp.upbound.io_clusters.yaml new file mode 100644 index 000000000..8b4b4826f --- /dev/null +++ b/package/crds/dataproc.gcp.upbound.io_clusters.yaml @@ -0,0 +1,1249 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: clusters.dataproc.gcp.upbound.io +spec: + group: dataproc.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the Clusters API. Manages a Cloud Dataproc + cluster resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of Cluster + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + clusterConfig: + description: Allows you to configure various aspects of the cluster. + Structure defined below. + items: + properties: + autoscalingConfig: + description: The autoscaling policy config associated with + the cluster. Note that once set, if autoscaling_config + is the only field set in cluster_config, it can only be + removed by setting policy_uri = "", rather than removing + the whole block. Structure defined below. + items: + properties: + policyUri: + description: The autoscaling policy used by the cluster. + type: string + required: + - policyUri + type: object + type: array + encryptionConfig: + description: The Customer managed encryption keys settings + for the cluster. Structure defined below. + items: + properties: + kmsKeyName: + description: The Cloud KMS key name to use for PD + disk encryption for all instances in the cluster. + type: string + required: + - kmsKeyName + type: object + type: array + endpointConfig: + description: The config settings for port access on the + cluster. Structure defined below. + items: + properties: + enableHttpPortAccess: + description: The flag to enable http access to specific + ports on the cluster from external sources (aka + Component Gateway). Defaults to false. + type: boolean + required: + - enableHttpPortAccess + type: object + type: array + gceClusterConfig: + description: Common config settings for resources of Google + Compute Engine cluster instances, applicable to all instances + in the cluster. Structure defined below. + items: + properties: + internalIpOnly: + description: 'By default, clusters are not restricted + to internal IP addresses, and will have ephemeral + external IP addresses assigned to each instance. + If set to true, all instances in the cluster will + only have internal IP addresses. Note: Private Google + Access (also known as privateIpGoogleAccess) must + be enabled on the subnetwork that the cluster will + be launched in.' + type: boolean + metadata: + additionalProperties: + type: string + description: A map of the Compute Engine metadata + entries to add to all instances (see Project and + instance metadata). + type: object + network: + description: The name or self_link of the Google Compute + Engine network to the cluster will be part of. Conflicts + with subnetwork. If neither is specified, this defaults + to the "default" network. + type: string + serviceAccount: + description: The service account to be used by the + Node VMs. If not specified, the "default" service + account is used. + type: string + serviceAccountRef: + description: Reference to a ServiceAccount in cloudplatform + to populate serviceAccount. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether + resolution of this reference is required. + The default is 'Required', which means the + reconcile will fail if the reference cannot + be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference + only when the corresponding field is not + present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + serviceAccountScopes: + description: The set of Google API scopes to be made + available on all of the node VMs under the service_account + specified. Both OAuth2 URLs and gcloud short names + are supported. To allow full access to all Cloud + APIs, use the cloud-platform scope. See a complete + list of scopes here. + items: + type: string + type: array + serviceAccountSelector: + description: Selector for a ServiceAccount in cloudplatform + to populate serviceAccount. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object + with the same controller reference as the selecting + object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with + matching labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether + resolution of this reference is required. + The default is 'Required', which means the + reconcile will fail if the reference cannot + be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference + only when the corresponding field is not + present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + shieldedInstanceConfig: + description: Shielded Instance Config for clusters + using Compute Engine Shielded VMs. + items: + properties: + enableIntegrityMonitoring: + description: Defines whether instances have + integrity monitoring enabled. + type: boolean + enableSecureBoot: + description: Defines whether instances have + Secure Boot enabled. + type: boolean + enableVtpm: + description: Defines whether instances have + the vTPM enabled. + type: boolean + type: object + type: array + subnetwork: + description: The name or self_link of the Google Compute + Engine subnetwork the cluster will be part of. Conflicts + with network. + type: string + tags: + description: The list of instance tags applied to + instances in the cluster. Tags are used to identify + valid sources or targets for network firewalls. + items: + type: string + type: array + zone: + description: 'The GCP zone where your data is stored + and used (i.e. where the master and the worker nodes + will be created in). If region is set to ''global'' + (default) then zone is mandatory, otherwise GCP + is able to make use of Auto Zone Placement to determine + this automatically for you. Note: This setting additionally + determines and restricts which computing resources + are available for use with other configs such as + cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.' + type: string + type: object + type: array + initializationAction: + description: Commands to execute on each node after config + is completed. You can specify multiple versions of these. + Structure defined below. + items: + properties: + script: + description: The script to be executed during initialization + of the cluster. The script must be a GCS file with + a gs:// prefix. + type: string + timeoutSec: + description: The maximum duration (in seconds) which + script is allowed to take to execute its action. + GCP will default to a predetermined computed value + if not set (currently 300). + type: number + required: + - script + type: object + type: array + lifecycleConfig: + description: The settings for auto deletion cluster schedule. + Structure defined below. + items: + properties: + autoDeleteTime: + description: 'The time when cluster will be auto-deleted. + A timestamp in RFC3339 UTC "Zulu" format, accurate + to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".' + type: string + idleDeleteTtl: + description: 'The duration to keep the cluster alive + while idling (no jobs running). After this TTL, + the cluster will be deleted. Valid range: [10m, + 14d].' + type: string + type: object + type: array + masterConfig: + description: The Google Compute Engine config settings for + the master instances in a cluster. Structure defined below. + items: + properties: + accelerators: + description: The Compute Engine accelerator (GPU) + configuration for these instances. Can be specified + multiple times. + items: + properties: + acceleratorCount: + description: The number of the accelerator cards + of this type exposed to this instance. Often + restricted to one of 1, 2, 4, or 8. + type: number + acceleratorType: + description: The short name of the accelerator + type to expose to this instance. For example, + nvidia-tesla-k80. + type: string + required: + - acceleratorCount + - acceleratorType + type: object + type: array + diskConfig: + description: Disk Config + items: + properties: + bootDiskSizeGb: + description: 'Size of the primary disk attached + to each node, specified in GB. The primary + disk contains the boot volume and system libraries, + and the smallest allowed disk size is 10GB. + GCP will default to a predetermined computed + value if not set (currently 500GB). Note: + If SSDs are not attached, it also contains + the HDFS data blocks and Hadoop working directories.' + type: number + bootDiskType: + description: The disk type of the primary disk + attached to each node. One of "pd-ssd" or + "pd-standard". Defaults to "pd-standard". + type: string + numLocalSsds: + description: The amount of local SSD disks that + will be attached to each master cluster node. + Defaults to 0. + type: number + type: object + type: array + imageUri: + description: The URI for the image to use for this + worker. See the guide for more information. + type: string + machineType: + description: The name of a Google Compute Engine machine + type to create for the master. If not specified, + GCP will default to a predetermined computed value + (currently n1-standard-4). + type: string + minCpuPlatform: + description: The name of a minimum generation of CPU + family for the master. If not specified, GCP will + default to a predetermined computed value for each + zone. See the guide for details about which CPU + families are available (and defaulted) for each + zone. + type: string + numInstances: + description: Specifies the number of master nodes + to create. If not specified, GCP will default to + a predetermined computed value (currently 1). + type: number + type: object + type: array + metastoreConfig: + description: The config setting for metastore service with + the cluster. Structure defined below. + items: + properties: + dataprocMetastoreService: + description: Resource name of an existing Dataproc + Metastore service. + type: string + required: + - dataprocMetastoreService + type: object + type: array + preemptibleWorkerConfig: + description: The Google Compute Engine config settings for + the additional instances in a cluster. Structure defined + below. + items: + properties: + diskConfig: + description: Disk Config + items: + properties: + bootDiskSizeGb: + description: 'Size of the primary disk attached + to each node, specified in GB. The primary + disk contains the boot volume and system libraries, + and the smallest allowed disk size is 10GB. + GCP will default to a predetermined computed + value if not set (currently 500GB). Note: + If SSDs are not attached, it also contains + the HDFS data blocks and Hadoop working directories.' + type: number + bootDiskType: + description: The disk type of the primary disk + attached to each node. One of "pd-ssd" or + "pd-standard". Defaults to "pd-standard". + type: string + numLocalSsds: + description: The amount of local SSD disks that + will be attached to each master cluster node. + Defaults to 0. + type: number + type: object + type: array + numInstances: + description: Specifies the number of preemptible nodes + to create. Defaults to 0. + type: number + preemptibility: + description: 'Specifies the preemptibility of the + secondary workers. The default value is PREEMPTIBLE + Accepted values are:' + type: string + type: object + type: array + securityConfig: + description: Security related configuration. Structure defined + below. + items: + properties: + kerberosConfig: + description: Kerberos Configuration + items: + properties: + crossRealmTrustAdminServer: + description: The admin server (IP or hostname) + for the remote trusted realm in a cross realm + trust relationship. + type: string + crossRealmTrustKdc: + description: The KDC (IP or hostname) for the + remote trusted realm in a cross realm trust + relationship. + type: string + crossRealmTrustRealm: + description: The remote realm the Dataproc on-cluster + KDC will trust, should the user enable cross + realm trust. + type: string + crossRealmTrustSharedPasswordUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the shared password + between the on-cluster Kerberos realm and + the remote trusted realm, in a cross realm + trust relationship. + type: string + enableKerberos: + description: Flag to indicate whether to Kerberize + the cluster. + type: boolean + kdcDbKeyUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the master key of + the KDC database. + type: string + keyPasswordUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the password to + the user provided key. For the self-signed + certificate, this password is generated by + Dataproc. + type: string + keystorePasswordUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the password to + the user provided keystore. For the self-signed + certificated, the password is generated by + Dataproc. + type: string + keystoreUri: + description: The Cloud Storage URI of the keystore + file used for SSL encryption. If not provided, + Dataproc will provide a self-signed certificate. + type: string + kmsKeyUri: + description: The URI of the KMS key used to + encrypt various sensitive files. + type: string + realm: + description: The name of the on-cluster Kerberos + realm. If not specified, the uppercased domain + of hostnames will be the realm. + type: string + rootPrincipalPasswordUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the root principal + password. + type: string + tgtLifetimeHours: + description: The lifetime of the ticket granting + ticket, in hours. + type: number + truststorePasswordUri: + description: The Cloud Storage URI of a KMS + encrypted file containing the password to + the user provided truststore. For the self-signed + certificate, this password is generated by + Dataproc. + type: string + truststoreUri: + description: The Cloud Storage URI of the truststore + file used for SSL encryption. If not provided, + Dataproc will provide a self-signed certificate. + type: string + required: + - kmsKeyUri + - rootPrincipalPasswordUri + type: object + type: array + required: + - kerberosConfig + type: object + type: array + softwareConfig: + description: The config settings for software inside the + cluster. Structure defined below. + items: + properties: + imageVersion: + description: The Cloud Dataproc image version to use + for the cluster - this controls the sets of software + versions installed onto the nodes when you create + clusters. If not specified, defaults to the latest + version. For a list of valid versions see Cloud + Dataproc versions + type: string + optionalComponents: + description: 'The set of optional components to activate + on the cluster. Accepted values are:' + items: + type: string + type: array + overrideProperties: + additionalProperties: + type: string + description: A list of override and additional properties + (key/value pairs) used to modify various aspects + of the common configuration files used when creating + a cluster. For a list of valid properties please + see Cluster properties + type: object + type: object + type: array + stagingBucket: + description: 'The Cloud Storage staging bucket used to stage + files, such as Hadoop jars, between client machines and + the cluster. Note: If you don''t explicitly specify a + staging_bucket then GCP will auto create / assign one + for you. However, you are not guaranteed an auto generated + bucket which is solely dedicated to your cluster; it may + be shared with other clusters in the same region/zone + also choosing to use the auto generation option.' + type: string + tempBucket: + description: 'The Cloud Storage temp bucket used to store + ephemeral cluster and jobs data, such as Spark and MapReduce + history files. Note: If you don''t explicitly specify + a temp_bucket then GCP will auto create / assign one for + you.' + type: string + workerConfig: + description: The Google Compute Engine config settings for + the worker instances in a cluster. Structure defined below. + items: + properties: + accelerators: + description: The Compute Engine accelerator configuration + for these instances. Can be specified multiple times. + items: + properties: + acceleratorCount: + description: The number of the accelerator cards + of this type exposed to this instance. Often + restricted to one of 1, 2, 4, or 8. + type: number + acceleratorType: + description: The short name of the accelerator + type to expose to this instance. For example, + nvidia-tesla-k80. + type: string + required: + - acceleratorCount + - acceleratorType + type: object + type: array + diskConfig: + description: Disk Config + items: + properties: + bootDiskSizeGb: + description: 'Size of the primary disk attached + to each node, specified in GB. The primary + disk contains the boot volume and system libraries, + and the smallest allowed disk size is 10GB. + GCP will default to a predetermined computed + value if not set (currently 500GB). Note: + If SSDs are not attached, it also contains + the HDFS data blocks and Hadoop working directories.' + type: number + bootDiskType: + description: The disk type of the primary disk + attached to each node. One of "pd-ssd" or + "pd-standard". Defaults to "pd-standard". + type: string + numLocalSsds: + description: The amount of local SSD disks that + will be attached to each master cluster node. + Defaults to 0. + type: number + type: object + type: array + imageUri: + description: The URI for the image to use for this + worker. See the guide for more information. + type: string + machineType: + description: The name of a Google Compute Engine machine + type to create for the worker nodes. If not specified, + GCP will default to a predetermined computed value + (currently n1-standard-4). + type: string + minCpuPlatform: + description: The name of a minimum generation of CPU + family for the master. If not specified, GCP will + default to a predetermined computed value for each + zone. See the guide for details about which CPU + families are available (and defaulted) for each + zone. + type: string + numInstances: + description: Specifies the number of worker nodes + to create. If not specified, GCP will default to + a predetermined computed value (currently 2). There + is currently a beta feature which allows you to + run a Single Node Cluster. In order to take advantage + of this you need to set "dataproc:dataproc.allow.zero.workers" + = "true" in cluster_config.software_config.properties + type: number + type: object + type: array + type: object + type: array + gracefulDecommissionTimeout: + description: Does not affect auto scaling decomissioning from + an autoscaling policy. Graceful decommissioning allows removing + nodes from the cluster without interrupting jobs in progress. + Timeout specifies how long to wait for jobs in progress to finish + before forcefully removing nodes (and potentially interrupting + jobs). Default timeout is 0 (for forceful decommission), and + the maximum allowed timeout is 1 day. (see JSON representation + of Duration). Only supported on Dataproc image versions 1.2 + and higher. For more context see the docs + type: string + labels: + additionalProperties: + type: string + description: The list of labels (key/value pairs) to be applied + to instances in the cluster. GCP generates some itself including + goog-dataproc-cluster-name which is the name of the cluster. + type: object + name: + description: The name of the cluster, unique within the project + and zone. + type: string + project: + description: The ID of the project in which the cluster will exist. + If it is not provided, the provider project is used. + type: string + region: + description: The region in which the cluster and associated nodes + will be created in. Defaults to global. + type: string + virtualClusterConfig: + description: Allows you to configure a virtual Dataproc on GKE + cluster. Structure defined below. + items: + properties: + auxiliaryServicesConfig: + description: Configuration of auxiliary services used by + this cluster. Structure defined below. + items: + properties: + metastoreConfig: + description: The config setting for metastore service + with the cluster. Structure defined below. + items: + properties: + dataprocMetastoreService: + description: Resource name of an existing Dataproc + Metastore service. + type: string + type: object + type: array + sparkHistoryServerConfig: + description: The Spark History Server configuration + for the workload. + items: + properties: + dataprocCluster: + description: Resource name of an existing Dataproc + Cluster to act as a Spark History Server for + the workload. + type: string + type: object + type: array + type: object + type: array + kubernetesClusterConfig: + description: The configuration for running the Dataproc + cluster on Kubernetes. Structure defined below. + items: + properties: + gkeClusterConfig: + description: The configuration for running the Dataproc + cluster on GKE. + items: + properties: + gkeClusterTarget: + description: A target GKE cluster to deploy + to. It must be in the same project and region + as the Dataproc cluster (the GKE cluster can + be zonal or regional) + type: string + nodePoolTarget: + description: GKE node pools where workloads + will be scheduled. At least one node pool + must be assigned the DEFAULT GkeNodePoolTarget.Role. + If a GkeNodePoolTarget is not specified, Dataproc + constructs a DEFAULT GkeNodePoolTarget. Each + role can be given to only one GkeNodePoolTarget. + All node pools must have the same location + settings. + items: + properties: + nodePool: + description: The target GKE node pool. + type: string + nodePoolConfig: + description: (Input only) The configuration + for the GKE node pool. If specified, + Dataproc attempts to create a node pool + with the specified shape. If one with + the same name already exists, it is + verified against all specified fields. + If a field differs, the virtual cluster + creation will fail. + items: + properties: + autoscaling: + description: The autoscaler configuration + for this node pool. The autoscaler + is enabled only when a valid configuration + is present. + items: + properties: + maxNodeCount: + description: The maximum number + of nodes in the node pool. + Must be >= minNodeCount, + and must be > 0. + type: number + minNodeCount: + description: The minimum number + of nodes in the node pool. + Must be >= 0 and <= maxNodeCount. + type: number + type: object + type: array + config: + description: The node pool configuration. + items: + properties: + localSsdCount: + description: The number of + local SSD disks to attach + to the node, which is limited + by the maximum number of + disks allowable per zone. + type: number + machineType: + description: The name of a + Compute Engine machine type. + type: string + minCpuPlatform: + description: Minimum CPU platform + to be used by this instance. + The instance may be scheduled + on the specified or a newer + CPU platform. Specify the + friendly names of CPU platforms, + such as "Intel Haswell" + or "Intel Sandy Bridge". + type: string + preemptible: + description: Whether the nodes + are created as preemptible + VM instances. Preemptible + nodes cannot be used in + a node pool with the CONTROLLER + role or in the DEFAULT node + pool if the CONTROLLER role + is not assigned (the DEFAULT + node pool will assume the + CONTROLLER role). + type: boolean + spot: + description: Spot flag for + enabling Spot VM, which + is a rebrand of the existing + preemptible flag. + type: boolean + type: object + type: array + locations: + description: The list of Compute + Engine zones where node pool nodes + associated with a Dataproc on + GKE virtual cluster will be located. + items: + type: string + type: array + required: + - locations + type: object + type: array + roles: + description: The roles associated with + the GKE node pool. One of "DEFAULT", + "CONTROLLER", "SPARK_DRIVER" or "SPARK_EXECUTOR". + items: + type: string + type: array + required: + - nodePool + - roles + type: object + type: array + type: object + type: array + kubernetesNamespace: + description: A namespace within the Kubernetes cluster + to deploy into. If this namespace does not exist, + it is created. If it exists, Dataproc verifies + that another Dataproc VirtualCluster is not installed + into it. If not specified, the name of the Dataproc + Cluster is used. + type: string + kubernetesSoftwareConfig: + description: The software configuration for this Dataproc + cluster running on Kubernetes. + items: + properties: + componentVersion: + additionalProperties: + type: string + description: The components that should be installed + in this Dataproc cluster. The key must be + a string from the KubernetesComponent enumeration. + The value is the version of the software to + be installed. At least one entry must be specified. + type: object + properties: + additionalProperties: + type: string + description: The properties to set on daemon + config files. Property keys are specified + in prefix:property format, for example spark:spark.kubernetes.container.image. + type: object + required: + - componentVersion + type: object + type: array + required: + - gkeClusterConfig + - kubernetesSoftwareConfig + type: object + type: array + stagingBucket: + description: 'The Cloud Storage staging bucket used to stage + files, such as Hadoop jars, between client machines and + the cluster. Note: If you don''t explicitly specify a + staging_bucket then GCP will auto create / assign one + for you. However, you are not guaranteed an auto generated + bucket which is solely dedicated to your cluster; it may + be shared with other clusters in the same region/zone + also choosing to use the auto generation option.' + type: string + type: object + type: array + required: + - name + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: ClusterStatus defines the observed state of Cluster. + properties: + atProvider: + properties: + clusterConfig: + description: Allows you to configure various aspects of the cluster. + Structure defined below. + items: + properties: + bucket: + description: The name of the cloud storage bucket ultimately + used to house the staging data for the cluster. If staging_bucket + is specified, it will contain this value, otherwise it + will be the auto generated name. + type: string + endpointConfig: + description: The config settings for port access on the + cluster. Structure defined below. + items: + properties: + httpPorts: + additionalProperties: + type: string + description: The map of port descriptions to URLs. + Will only be populated if enable_http_port_access + is true. + type: object + type: object + type: array + lifecycleConfig: + description: The settings for auto deletion cluster schedule. + Structure defined below. + items: + properties: + idleStartTime: + description: Time when the cluster became idle (most + recent job finished) and became eligible for deletion + due to idleness. + type: string + type: object + type: array + masterConfig: + description: The Google Compute Engine config settings for + the master instances in a cluster. Structure defined below. + items: + properties: + instanceNames: + description: List of worker instance names which have + been assigned to the cluster. + items: + type: string + type: array + type: object + type: array + preemptibleWorkerConfig: + description: The Google Compute Engine config settings for + the additional instances in a cluster. Structure defined + below. + items: + properties: + instanceNames: + description: List of worker instance names which have + been assigned to the cluster. + items: + type: string + type: array + type: object + type: array + softwareConfig: + description: The config settings for software inside the + cluster. Structure defined below. + items: + properties: + properties: + additionalProperties: + type: string + description: The properties to set on daemon config + files. Property keys are specified in prefix:property + format, for example spark:spark.kubernetes.container.image. + type: object + type: object + type: array + workerConfig: + description: The Google Compute Engine config settings for + the worker instances in a cluster. Structure defined below. + items: + properties: + instanceNames: + description: List of worker instance names which have + been assigned to the cluster. + items: + type: string + type: array + type: object + type: array + type: object + type: array + id: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/dataproc.gcp.upbound.io_jobs.yaml b/package/crds/dataproc.gcp.upbound.io_jobs.yaml new file mode 100644 index 000000000..500d3b849 --- /dev/null +++ b/package/crds/dataproc.gcp.upbound.io_jobs.yaml @@ -0,0 +1,936 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: jobs.dataproc.gcp.upbound.io +spec: + group: dataproc.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: Job + listKind: JobList + plural: jobs + singular: job + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Job is the Schema for the Jobs API. Manages a job resource within + a Dataproc cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: JobSpec defines the desired state of Job + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + forceDelete: + description: By default, you can only delete inactive jobs within + Dataproc. Setting this to true, and calling destroy, will ensure + that the job is first cancelled before issuing the delete. + type: boolean + hadoopConfig: + items: + properties: + archiveUris: + description: HCFS URIs of archives to be extracted in the + working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + items: + type: string + type: array + args: + description: The arguments to pass to the driver. Do not + include arguments, such as -libjars or -Dfoo=bar, that + can be set as job properties, since a collision may occur + that causes an incorrect job submission. + items: + type: string + type: array + fileUris: + description: HCFS URIs of files to be copied to the working + directory of Hadoop drivers and distributed tasks. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: HCFS URIs of jar files to add to the CLASSPATHs + of the Spark driver and tasks. + items: + type: string + type: array + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + mainClass: + description: The name of the driver's main class. The jar + file containing the class must be in the default CLASSPATH + or specified in jar_file_uris. Conflicts with main_jar_file_uri + type: string + mainJarFileUri: + description: 'The HCFS URI of the jar file containing the + main class. Examples: ''gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'' + ''hdfs:/tmp/test-samples/custom-wordcount.jar'' ''file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar''. + Conflicts with main_class' + type: string + properties: + additionalProperties: + type: string + description: A mapping of property names to values, used + to configure Hadoop. Properties that conflict with values + set by the Cloud Dataproc API may be overwritten. Can + include properties set in /etc/hadoop/conf/*-site and + classes in user code.. + type: object + type: object + type: array + hiveConfig: + items: + properties: + continueOnFailure: + description: Whether to continue executing queries if a + query fails. The default value is false. Setting to true + can be useful when executing independent parallel queries. + Defaults to false. + type: boolean + jarFileUris: + description: HCFS URIs of jar files to add to the CLASSPATH + of the Hive server and Hadoop MapReduce (MR) tasks. Can + contain Hive SerDes and UDFs. + items: + type: string + type: array + properties: + additionalProperties: + type: string + description: A mapping of property names and values, used + to configure Hive. Properties that conflict with values + set by the Cloud Dataproc API may be overwritten. Can + include properties set in /etc/hadoop/conf/*-site.xml, + /etc/hive/conf/hive-site.xml, and classes in user code.. + type: object + queryFileUri: + description: HCFS URI of file containing Hive script to + execute as the job. Conflicts with query_list + type: string + queryList: + description: The list of Hive queries or statements to execute + as part of the job. Conflicts with query_file_uri + items: + type: string + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Mapping of query variable names to values + (equivalent to the Hive command: SET name="value";).' + type: object + type: object + type: array + labels: + additionalProperties: + type: string + description: The list of labels (key/value pairs) to add to the + job. + type: object + pigConfig: + items: + properties: + continueOnFailure: + description: Whether to continue executing queries if a + query fails. The default value is false. Setting to true + can be useful when executing independent parallel queries. + Defaults to false. + type: boolean + jarFileUris: + description: HCFS URIs of jar files to add to the CLASSPATH + of the Pig Client and Hadoop MapReduce (MR) tasks. Can + contain Pig UDFs. + items: + type: string + type: array + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + properties: + additionalProperties: + type: string + description: A mapping of property names to values, used + to configure Pig. Properties that conflict with values + set by the Cloud Dataproc API may be overwritten. Can + include properties set in /etc/hadoop/conf/*-site.xml, + /etc/pig/conf/pig.properties, and classes in user code. + type: object + queryFileUri: + description: HCFS URI of file containing Hive script to + execute as the job. Conflicts with query_list + type: string + queryList: + description: The list of Hive queries or statements to execute + as part of the job. Conflicts with query_file_uri + items: + type: string + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Mapping of query variable names to values + (equivalent to the Pig command: name=[value]).' + type: object + type: object + type: array + placement: + items: + properties: + clusterName: + description: The name of the cluster where the job will + be submitted. + type: string + clusterNameRef: + description: Reference to a Cluster in dataproc to populate + clusterName. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + clusterNameSelector: + description: Selector for a Cluster in dataproc to populate + clusterName. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with + the same controller reference as the selecting object + is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching + labels is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution + of this reference is required. The default is + 'Required', which means the reconcile will fail + if the reference cannot be resolved. 'Optional' + means this reference will be a no-op if it cannot + be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference + should be resolved. The default is 'IfNotPresent', + which will attempt to resolve the reference only + when the corresponding field is not present. Use + 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + type: array + prestoConfig: + items: + properties: + clientTags: + description: Presto client tags to attach to this query. + items: + type: string + type: array + continueOnFailure: + description: Whether to continue executing queries if a + query fails. Setting to true can be useful when executing + independent parallel queries. Defaults to false. + type: boolean + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + outputFormat: + description: The format in which query output will be displayed. + See the Presto documentation for supported output formats. + type: string + properties: + additionalProperties: + type: string + description: A mapping of property names to values. Used + to set Presto session properties Equivalent to using the + --session flag in the Presto CLI. + type: object + queryFileUri: + description: The HCFS URI of the script that contains SQL + queries. Conflicts with query_list + type: string + queryList: + description: The list of SQL queries or statements to execute + as part of the job. Conflicts with query_file_uri + items: + type: string + type: array + type: object + type: array + project: + description: The project in which the cluster can be found and + jobs subsequently run against. If it is not provided, the provider + project is used. + type: string + pysparkConfig: + items: + properties: + archiveUris: + description: HCFS URIs of archives to be extracted in the + working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + items: + type: string + type: array + args: + description: The arguments to pass to the driver. + items: + type: string + type: array + fileUris: + description: HCFS URIs of files to be copied to the working + directory of Python drivers and distributed tasks. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: HCFS URIs of jar files to add to the CLASSPATHs + of the Python driver and tasks. + items: + type: string + type: array + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + mainPythonFileUri: + description: The HCFS URI of the main Python file to use + as the driver. Must be a .py file. + type: string + properties: + additionalProperties: + type: string + description: A mapping of property names to values, used + to configure PySpark. Properties that conflict with values + set by the Cloud Dataproc API may be overwritten. Can + include properties set in /etc/spark/conf/spark-defaults.conf + and classes in user code. + type: object + pythonFileUris: + description: 'HCFS file URIs of Python files to pass to + the PySpark framework. Supported file types: .py, .egg, + and .zip.' + items: + type: string + type: array + required: + - mainPythonFileUri + type: object + type: array + reference: + items: + properties: + jobId: + type: string + type: object + type: array + region: + description: The Cloud Dataproc region. This essentially determines + which clusters are available for this job to be submitted to. + If not specified, defaults to global. + type: string + regionRef: + description: Reference to a Cluster in dataproc to populate region. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + regionSelector: + description: Selector for a Cluster in dataproc to populate region. + properties: + matchControllerRef: + description: MatchControllerRef ensures an object with the + same controller reference as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + scheduling: + items: + properties: + maxFailuresPerHour: + description: Maximum number of times per hour a driver may + be restarted as a result of driver exiting with non-zero + code before job is reported failed. + type: number + maxFailuresTotal: + description: Maximum number of times in total a driver may + be restarted as a result of driver exiting with non-zero + code before job is reported failed. + type: number + required: + - maxFailuresPerHour + - maxFailuresTotal + type: object + type: array + sparkConfig: + items: + properties: + archiveUris: + description: HCFS URIs of archives to be extracted in the + working directory of .jar, .tar, .tar.gz, .tgz, and .zip. + items: + type: string + type: array + args: + description: The arguments to pass to the driver. + items: + type: string + type: array + fileUris: + description: HCFS URIs of files to be copied to the working + directory of Spark drivers and distributed tasks. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: HCFS URIs of jar files to add to the CLASSPATHs + of the Spark driver and tasks. + items: + type: string + type: array + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + mainClass: + description: The class containing the main method of the + driver. Must be in a provided jar or jar that is already + on the classpath. Conflicts with main_jar_file_uri + type: string + mainJarFileUri: + description: The HCFS URI of jar file containing the driver + jar. Conflicts with main_class + type: string + properties: + additionalProperties: + type: string + description: A mapping of property names to values, used + to configure Spark. Properties that conflict with values + set by the Cloud Dataproc API may be overwritten. Can + include properties set in /etc/spark/conf/spark-defaults.conf + and classes in user code. + type: object + type: object + type: array + sparksqlConfig: + items: + properties: + jarFileUris: + description: HCFS URIs of jar files to be added to the Spark + CLASSPATH. + items: + type: string + type: array + loggingConfig: + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for the driver. + This may include ''root'' package name to configure + rootLogger. Examples: ''com.google = FATAL'', ''root + = INFO'', ''org.apache = DEBUG''' + type: object + required: + - driverLogLevels + type: object + type: array + properties: + additionalProperties: + type: string + description: A mapping of property names to values, used + to configure Spark SQL's SparkConf. Properties that conflict + with values set by the Cloud Dataproc API may be overwritten. + type: object + queryFileUri: + description: The HCFS URI of the script that contains SQL + queries. Conflicts with query_list + type: string + queryList: + description: The list of SQL queries or statements to execute + as part of the job. Conflicts with query_file_uri + items: + type: string + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Mapping of query variable names to values + (equivalent to the Spark SQL command: SET name="value";).' + type: object + type: object + type: array + required: + - placement + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: JobStatus defines the observed state of Job. + properties: + atProvider: + properties: + driverControlsFilesUri: + description: If present, the location of miscellaneous control + files which may be used as part of job setup and handling. If + not present, control files may be placed in the same location + as driver_output_uri. + type: string + driverOutputResourceUri: + description: A URI pointing to the location of the stdout of the + job's driver program. + type: string + id: + type: string + placement: + items: + properties: + clusterUuid: + description: A cluster UUID generated by the Cloud Dataproc + service when the job is submitted. + type: string + type: object + type: array + status: + items: + properties: + details: + description: Optional job state details, such as an error + description if the state is ERROR. + type: string + state: + description: A state message specifying the overall job + state. + type: string + stateStartTime: + description: The time when this state was entered. + type: string + substate: + description: Additional state information, which includes + status reported by the agent. + type: string + type: object + type: array + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml b/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml new file mode 100644 index 000000000..ff7d2d1f8 --- /dev/null +++ b/package/crds/dataproc.gcp.upbound.io_workflowtemplates.yaml @@ -0,0 +1,1976 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: workflowtemplates.dataproc.gcp.upbound.io +spec: + group: dataproc.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + singular: workflowtemplate + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: WorkflowTemplate is the Schema for the WorkflowTemplates API. + A Workflow Template is a reusable workflow configuration. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: WorkflowTemplateSpec defines the desired state of WorkflowTemplate + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + dagTimeout: + description: (Beta only) Optional. Timeout duration for the DAG + of jobs. You can use "s", "m", "h", and "d" suffixes for second, + minute, hour, and day duration values, respectively. The timeout + duration must be from 10 minutes ("10m") to 24 hours ("24h" + or "1d"). The timer begins when the first job is submitted. + If the workflow is running at the end of the timeout period, + any remaining jobs are cancelled, the workflow is ended, and + if the workflow was running on a (/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), + the cluster is deleted. + type: string + jobs: + description: Required. The Directed Acyclic Graph of Jobs to submit. + items: + properties: + hadoopJob: + description: Optional. Job is a Hadoop job. + items: + properties: + archiveUris: + description: 'Optional. HCFS URIs of archives to be + extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, + and .zip.' + items: + type: string + type: array + args: + description: Optional. The arguments to pass to the + driver. Do not include arguments, such as --conf, + that can be set as job properties, since a collision + may occur that causes an incorrect job submission. + items: + type: string + type: array + fileUris: + description: Optional. HCFS URIs of files to be placed + in the working directory of each executor. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + mainClass: + description: The name of the driver's main class. + The jar file that contains the class must be in + the default CLASSPATH or specified in jar_file_uris. + type: string + mainJarFileUri: + description: The HCFS URI of the jar file that contains + the main class. + type: string + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + type: object + type: array + hiveJob: + description: Optional. Job is a Hive job. + items: + properties: + continueOnFailure: + description: Optional. Whether to continue executing + queries if a query fails. The default value is false. + Setting to true can be useful when executing independent + parallel queries. + type: boolean + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + queryFileUri: + description: The HCFS URI of the script that contains + SQL queries. + type: string + queryList: + description: A list of queries. + items: + properties: + queries: + description: 'Required. The queries to execute. + You do not need to end a query expression + with a semicolon. Multiple queries can be + specified in one string by separating each + with a semicolon. Here is an example of a + Dataproc API snippet that uses a QueryList + to specify a HiveJob: "hiveJob": { "queryList": + { "queries": } }' + items: + type: string + type: array + required: + - queries + type: object + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Optional. Mapping of query variable + names to values (equivalent to the Spark SQL command: + SET name="value";).' + type: object + type: object + type: array + labels: + additionalProperties: + type: string + description: 'Optional. The labels to associate with this + job. Label keys must be between 1 and 63 characters long, + and must conform to the following regular expression: + {0,63} No more than 32 labels can be associated with a + given job.' + type: object + pigJob: + description: Optional. Job is a Pig job. + items: + properties: + continueOnFailure: + description: Optional. Whether to continue executing + queries if a query fails. The default value is false. + Setting to true can be useful when executing independent + parallel queries. + type: boolean + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + queryFileUri: + description: The HCFS URI of the script that contains + SQL queries. + type: string + queryList: + description: A list of queries. + items: + properties: + queries: + description: 'Required. The queries to execute. + You do not need to end a query expression + with a semicolon. Multiple queries can be + specified in one string by separating each + with a semicolon. Here is an example of a + Dataproc API snippet that uses a QueryList + to specify a HiveJob: "hiveJob": { "queryList": + { "queries": } }' + items: + type: string + type: array + required: + - queries + type: object + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Optional. Mapping of query variable + names to values (equivalent to the Spark SQL command: + SET name="value";).' + type: object + type: object + type: array + prerequisiteStepIds: + description: Optional. The optional list of prerequisite + job step_ids. If not specified, the job will start at + the beginning of workflow. + items: + type: string + type: array + prestoJob: + description: Optional. Job is a Presto job. + items: + properties: + clientTags: + description: Optional. Presto client tags to attach + to this query + items: + type: string + type: array + continueOnFailure: + description: Optional. Whether to continue executing + queries if a query fails. The default value is false. + Setting to true can be useful when executing independent + parallel queries. + type: boolean + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + outputFormat: + description: Optional. The format in which query output + will be displayed. See the Presto documentation + for supported output formats + type: string + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + queryFileUri: + description: The HCFS URI of the script that contains + SQL queries. + type: string + queryList: + description: A list of queries. + items: + properties: + queries: + description: 'Required. The queries to execute. + You do not need to end a query expression + with a semicolon. Multiple queries can be + specified in one string by separating each + with a semicolon. Here is an example of a + Dataproc API snippet that uses a QueryList + to specify a HiveJob: "hiveJob": { "queryList": + { "queries": } }' + items: + type: string + type: array + required: + - queries + type: object + type: array + type: object + type: array + pysparkJob: + description: Optional. Job is a PySpark job. + items: + properties: + archiveUris: + description: 'Optional. HCFS URIs of archives to be + extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, + and .zip.' + items: + type: string + type: array + args: + description: Optional. The arguments to pass to the + driver. Do not include arguments, such as --conf, + that can be set as job properties, since a collision + may occur that causes an incorrect job submission. + items: + type: string + type: array + fileUris: + description: Optional. HCFS URIs of files to be placed + in the working directory of each executor. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + mainPythonFileUri: + description: Required. The HCFS URI of the main Python + file to use as the driver. Must be a .py file. + type: string + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + pythonFileUris: + description: 'Optional. HCFS file URIs of Python files + to pass to the PySpark framework. Supported file + types: .py, .egg, and .zip.' + items: + type: string + type: array + required: + - mainPythonFileUri + type: object + type: array + scheduling: + description: Optional. Job scheduling configuration. + items: + properties: + maxFailuresPerHour: + description: Optional. Maximum number of times per + hour a driver may be restarted as a result of driver + exiting with non-zero code before job is reported + failed. A job may be reported as thrashing if driver + exits with non-zero code 4 times within 10 minute + window. Maximum value is 10. + type: number + maxFailuresTotal: + description: Optional. Maximum number of times in + total a driver may be restarted as a result of driver + exiting with non-zero code before job is reported + failed. Maximum value is 240 + type: number + type: object + type: array + sparkJob: + description: Optional. Job is a Spark job. + items: + properties: + archiveUris: + description: 'Optional. HCFS URIs of archives to be + extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, + and .zip.' + items: + type: string + type: array + args: + description: Optional. The arguments to pass to the + driver. Do not include arguments, such as --conf, + that can be set as job properties, since a collision + may occur that causes an incorrect job submission. + items: + type: string + type: array + fileUris: + description: Optional. HCFS URIs of files to be placed + in the working directory of each executor. Useful + for naively parallel tasks. + items: + type: string + type: array + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + mainClass: + description: The name of the driver's main class. + The jar file that contains the class must be in + the default CLASSPATH or specified in jar_file_uris. + type: string + mainJarFileUri: + description: The HCFS URI of the jar file that contains + the main class. + type: string + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + type: object + type: array + sparkRJob: + description: Optional. Job is a SparkR job. + items: + properties: + archiveUris: + description: 'Optional. HCFS URIs of archives to be + extracted into the working directory of each executor. + Supported file types: .jar, .tar, .tar.gz, .tgz, + and .zip.' + items: + type: string + type: array + args: + description: Optional. The arguments to pass to the + driver. Do not include arguments, such as --conf, + that can be set as job properties, since a collision + may occur that causes an incorrect job submission. + items: + type: string + type: array + fileUris: + description: Optional. HCFS URIs of files to be placed + in the working directory of each executor. Useful + for naively parallel tasks. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + mainRFileUri: + description: Required. The HCFS URI of the main R + file to use as the driver. Must be a .R file. + type: string + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + required: + - mainRFileUri + type: object + type: array + sparkSqlJob: + description: Optional. Job is a SparkSql job. + items: + properties: + jarFileUris: + description: Optional. HCFS URIs of jar files to be + added to the Spark CLASSPATH. + items: + type: string + type: array + loggingConfig: + description: Optional. The runtime log config for + job execution. + items: + properties: + driverLogLevels: + additionalProperties: + type: string + description: 'The per-package log levels for + the driver. This may include "root" package + name to configure rootLogger. Examples: ''com.google + = FATAL'', ''root = INFO'', ''org.apache = + DEBUG''' + type: object + type: object + type: array + properties: + additionalProperties: + type: string + description: Optional. A mapping of property names + to values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + type: object + queryFileUri: + description: The HCFS URI of the script that contains + SQL queries. + type: string + queryList: + description: A list of queries. + items: + properties: + queries: + description: 'Required. The queries to execute. + You do not need to end a query expression + with a semicolon. Multiple queries can be + specified in one string by separating each + with a semicolon. Here is an example of a + Dataproc API snippet that uses a QueryList + to specify a HiveJob: "hiveJob": { "queryList": + { "queries": } }' + items: + type: string + type: array + required: + - queries + type: object + type: array + scriptVariables: + additionalProperties: + type: string + description: 'Optional. Mapping of query variable + names to values (equivalent to the Spark SQL command: + SET name="value";).' + type: object + type: object + type: array + stepId: + description: Required. The step id. The id must be unique + among all jobs within the template. The step id is used + as prefix for job id, as job goog-dataproc-workflow-step-id + label, and in field from other steps. The id must contain + only letters (a-z, A-Z), numbers (0-9), underscores (_), + and hyphens (-). Cannot begin or end with underscore or + hyphen. Must consist of between 3 and 50 characters. + type: string + required: + - stepId + type: object + type: array + labels: + additionalProperties: + type: string + description: 'Optional. The labels to associate with this cluster. + Label keys must be between 1 and 63 characters long, and must + conform to the following PCRE regular expression: {0,63} No + more than 32 labels can be associated with a given cluster.' + type: object + location: + description: The location for the resource + type: string + parameters: + description: Optional. Template parameters whose values are substituted + into the template. Values for parameters must be provided when + the template is instantiated. + items: + properties: + description: + description: Optional. Brief description of the parameter. + Must not exceed 1024 characters. + type: string + fields: + description: Required. Paths to all fields that the parameter + replaces. A field is allowed to appear in at most one + parameter's list of field paths. A field path is similar + in syntax to a .sparkJob.args + items: + type: string + type: array + name: + description: Required. Parameter name. The parameter name + is used as the key, and paired with the parameter value, + which are passed to the template when the template is + instantiated. The name must contain only capital letters + (A-Z), numbers (0-9), and underscores (_), and must not + start with a number. The maximum length is 40 characters. + type: string + validation: + description: Optional. Validation rules to be applied to + this parameter's value. + items: + properties: + regex: + description: Validation based on regular expressions. + items: + properties: + regexes: + description: Required. RE2 regular expressions + used to validate the parameter's value. The + value must match the regex in its entirety + (substring matches are not sufficient). + items: + type: string + type: array + required: + - regexes + type: object + type: array + values: + description: Required. List of allowed values for + the parameter. + items: + properties: + values: + description: Required. List of allowed values + for the parameter. + items: + type: string + type: array + required: + - values + type: object + type: array + type: object + type: array + required: + - fields + - name + type: object + type: array + placement: + description: Required. WorkflowTemplate scheduling information. + items: + properties: + clusterSelector: + description: Optional. A selector that chooses target cluster + for jobs based on metadata. The selector is evaluated + at the time each job is submitted. + items: + properties: + clusterLabels: + additionalProperties: + type: string + description: Required. The cluster labels. Cluster + must have all labels to match. + type: object + zone: + description: 'Optional. The zone where the Compute + Engine cluster will be located. On a create request, + it is required in the "global" region. If omitted + in a non-global Dataproc region, the service will + pick a zone in the corresponding Compute Engine + region. On a get request, zone will always be present. + A full URL, partial URI, or short name are valid. + Examples: * https://www.googleapis.com/compute/v1/projects/ + * us-central1-f' + type: string + required: + - clusterLabels + type: object + type: array + managedCluster: + description: A cluster that is managed by the workflow. + items: + properties: + clusterName: + description: Required. The cluster name prefix. A + unique cluster name will be formed by appending + a random suffix. The name must contain only lower-case + letters (a-z), numbers (0-9), and hyphens (-). Must + begin with a letter. Cannot begin or end with hyphen. + Must consist of between 2 and 35 characters. + type: string + config: + description: Required. The cluster configuration. + items: + properties: + autoscalingConfig: + description: Optional. Autoscaling config for + the policy associated with the cluster. Cluster + does not autoscale if this field is unset. + items: + properties: + policy: + description: 'Optional. The autoscaling + policy used by the cluster. Only resource + names including projectid and location + (region) are valid. Examples: * https://www.googleapis.com/compute/v1/projects/ + Note that the policy must be in the + same project and Dataproc region.' + type: string + type: object + type: array + encryptionConfig: + description: Optional. Encryption settings for + the cluster. + items: + properties: + gcePdKmsKeyName: + description: Optional. The Cloud KMS key + name to use for PD disk encryption for + all instances in the cluster. + type: string + type: object + type: array + endpointConfig: + description: Optional. Port/endpoint configuration + for this cluster + items: + properties: + enableHttpPortAccess: + description: Optional. If true, enable + http access to specific ports on the + cluster from external sources. Defaults + to false. + type: boolean + type: object + type: array + gceClusterConfig: + description: Optional. The shared Compute Engine + config settings for all instances in a cluster. + items: + properties: + internalIpOnly: + description: Optional. If true, all instances + in the cluster will only have internal + IP addresses. By default, clusters are + not restricted to internal IP addresses, + and will have ephemeral external IP + addresses assigned to each instance. + This internal_ip_only restriction can + only be enabled for subnetwork enabled + networks, and all off-cluster dependencies + must be configured to be accessible + without external IP addresses. + type: boolean + metadata: + additionalProperties: + type: string + description: The Compute Engine metadata + entries to add to all instances (see + (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + type: object + network: + description: Optional. The Compute Engine + network to be used for machine communications. + Cannot be specified with subnetwork_uri. + If neither network_uri nor subnetwork_uri + is specified, the "default" network + of the project is used, if it exists. + Cannot be a "Custom Subnet Network" + (see /regions/global/default*default` + type: string + nodeGroupAffinity: + description: Optional. Node Group Affinity + for sole-tenant clusters. + items: + properties: + nodeGroup: + description: Required. The URI of + a sole-tenant /zones/us-central1-a/nodeGroups/node-group-1*node-group-1` + type: string + required: + - nodeGroup + type: object + type: array + privateIpv6GoogleAccess: + description: 'Optional. The type of IPv6 + access for a cluster. Possible values: + PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, + INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL' + type: string + reservationAffinity: + description: Optional. Reservation Affinity + for consuming Zonal reservation. + items: + properties: + consumeReservationType: + description: 'Optional. Type of + reservation to consume Possible + values: TYPE_UNSPECIFIED, NO_RESERVATION, + ANY_RESERVATION, SPECIFIC_RESERVATION' + type: string + key: + description: Optional. Corresponds + to the label key of reservation + resource. + type: string + values: + description: Required. List of allowed + values for the parameter. + items: + type: string + type: array + type: object + type: array + serviceAccount: + description: Optional. The (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) + is used. + type: string + serviceAccountScopes: + description: 'Optional. The URIs of service + account scopes to be included in Compute + Engine instances. The following base + set of scopes is always included: * + https://www.googleapis.com/auth/cloud.useraccounts.readonly + * https://www.googleapis.com/auth/devstorage.read_write + * https://www.googleapis.com/auth/logging.write + If no scopes are specified, the following + defaults are also provided: * https://www.googleapis.com/auth/bigquery + * https://www.googleapis.com/auth/bigtable.admin.table + * https://www.googleapis.com/auth/bigtable.data + * https://www.googleapis.com/auth/devstorage.full_control' + items: + type: string + type: array + shieldedInstanceConfig: + description: Optional. Shielded Instance + Config for clusters using Compute Engine + Shielded VMs. Structure defined below. + items: + properties: + enableIntegrityMonitoring: + description: Optional. Defines whether + instances have Integrity Monitoring + enabled. + type: boolean + enableSecureBoot: + description: Optional. Defines whether + instances have Secure Boot enabled. + type: boolean + enableVtpm: + description: Optional. Defines whether + instances have the vTPM enabled. + type: boolean + type: object + type: array + subnetwork: + description: 'Optional. The Compute Engine + subnetwork to be used for machine communications. + Cannot be specified with network_uri. + A full URL, partial URI, or short name + are valid. Examples: * https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0 + * sub0' + type: string + tags: + description: The Compute Engine tags to + add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + items: + type: string + type: array + zone: + description: 'Optional. The zone where + the Compute Engine cluster will be located. + On a create request, it is required + in the "global" region. If omitted in + a non-global Dataproc region, the service + will pick a zone in the corresponding + Compute Engine region. On a get request, + zone will always be present. A full + URL, partial URI, or short name are + valid. Examples: * https://www.googleapis.com/compute/v1/projects/ + * us-central1-f' + type: string + type: object + type: array + initializationActions: + description: 'Optional. Commands to execute + on each node after config is completed. By + default, executables are run on master and + all worker nodes. You can test a node''s role + metadata to run an executable on a master + or worker node, as shown below using curl + (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google + http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + if ; then ... master specific actions ... + else ... worker specific actions ... fi' + items: + properties: + executableFile: + description: Required. Cloud Storage URI + of executable file. + type: string + executionTimeout: + description: Optional. Amount of time + executable has to complete. Default + is 10 minutes (see JSON representation + of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + Cluster creation fails with an explanatory + error message (the name of the executable + that caused the error and the exceeded + timeout period) if the executable is + not completed at end of the timeout + period. + type: string + type: object + type: array + lifecycleConfig: + description: Optional. Lifecycle setting for + the cluster. + items: + properties: + autoDeleteTime: + description: Optional. The time when cluster + will be auto-deleted (see JSON representation + of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + type: string + autoDeleteTtl: + description: Optional. The lifetime duration + of cluster. The cluster will be auto-deleted + at the end of this period. Minimum value + is 10 minutes; maximum value is 14 days + (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + type: string + idleDeleteTtl: + description: Optional. The duration to + keep the cluster alive while idling + (when no jobs are running). Passing + this threshold will cause the cluster + to be deleted. Minimum value is 5 minutes; + maximum value is 14 days (see JSON representation + of (https://developers.google.com/protocol-buffers/docs/proto3#json). + type: string + type: object + type: array + masterConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + accelerators: + description: Optional. The Compute Engine + accelerator configuration for these + instances. + items: + properties: + acceleratorCount: + description: The number of the accelerator + cards of this type exposed to + this instance. + type: number + acceleratorType: + description: Full URL, partial URI, + or short name of the accelerator + type resource to expose to this + instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short + name of the accelerator type resource, + for example, nvidia-tesla-k80. + type: string + type: object + type: array + diskConfig: + description: Optional. Disk option config + settings. + items: + properties: + bootDiskSizeGb: + description: Optional. Size in GB + of the boot disk (default is 500GB). + type: number + bootDiskType: + description: 'Optional. Type of + the boot disk (default is "pd-standard"). + Valid values: "pd-ssd" (Persistent + Disk Solid State Drive) or "pd-standard" + (Persistent Disk Hard Disk Drive).' + type: string + numLocalSsds: + description: Optional. Number of + attached SSDs, from 0 to 4 (default + is 0). If SSDs are not attached, + the boot disk is used to store + runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) + data. If one or more SSDs are + attached, this runtime bulk data + is spread across them, and the + boot disk contains only basic + config and installed binaries. + type: number + type: object + type: array + image: + description: 'Optional. The Compute Engine + image resource used for cluster instances. + The URI can represent an image or image + family. Image examples: * https://www.googleapis.com/compute/beta/projects/ + If the URI is unspecified, it will be + inferred from SoftwareConfig.image_version + or the system default.' + type: string + machineType: + description: 'Optional. The Compute Engine + machine type used for cluster instances. + A full URL, partial URI, or short name + are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short name + of the machine type resource, for example, + n1-standard-2`.' + type: string + minCpuPlatform: + description: Optional. Specifies the minimum + cpu platform for the Instance Group. + See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + type: string + numInstances: + description: Optional. The number of VM + instances in the instance group. For + master instance groups, must be set + to 1. + type: number + preemptibility: + description: 'Optional. Specifies the + preemptibility of the instance group. + The default value for master and worker + groups is NON_PREEMPTIBLE. This default + cannot be changed. The default value + for secondary instances is PREEMPTIBLE. + Possible values: PREEMPTIBILITY_UNSPECIFIED, + NON_PREEMPTIBLE, PREEMPTIBLE' + type: string + type: object + type: array + secondaryWorkerConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + accelerators: + description: Optional. The Compute Engine + accelerator configuration for these + instances. + items: + properties: + acceleratorCount: + description: The number of the accelerator + cards of this type exposed to + this instance. + type: number + acceleratorType: + description: Full URL, partial URI, + or short name of the accelerator + type resource to expose to this + instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short + name of the accelerator type resource, + for example, nvidia-tesla-k80. + type: string + type: object + type: array + diskConfig: + description: Optional. Disk option config + settings. + items: + properties: + bootDiskSizeGb: + description: Optional. Size in GB + of the boot disk (default is 500GB). + type: number + bootDiskType: + description: 'Optional. Type of + the boot disk (default is "pd-standard"). + Valid values: "pd-ssd" (Persistent + Disk Solid State Drive) or "pd-standard" + (Persistent Disk Hard Disk Drive).' + type: string + numLocalSsds: + description: Optional. Number of + attached SSDs, from 0 to 4 (default + is 0). If SSDs are not attached, + the boot disk is used to store + runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) + data. If one or more SSDs are + attached, this runtime bulk data + is spread across them, and the + boot disk contains only basic + config and installed binaries. + type: number + type: object + type: array + image: + description: 'Optional. The Compute Engine + image resource used for cluster instances. + The URI can represent an image or image + family. Image examples: * https://www.googleapis.com/compute/beta/projects/ + If the URI is unspecified, it will be + inferred from SoftwareConfig.image_version + or the system default.' + type: string + machineType: + description: 'Optional. The Compute Engine + machine type used for cluster instances. + A full URL, partial URI, or short name + are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short name + of the machine type resource, for example, + n1-standard-2`.' + type: string + minCpuPlatform: + description: Optional. Specifies the minimum + cpu platform for the Instance Group. + See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + type: string + numInstances: + description: Optional. The number of VM + instances in the instance group. For + master instance groups, must be set + to 1. + type: number + preemptibility: + description: 'Optional. Specifies the + preemptibility of the instance group. + The default value for master and worker + groups is NON_PREEMPTIBLE. This default + cannot be changed. The default value + for secondary instances is PREEMPTIBLE. + Possible values: PREEMPTIBILITY_UNSPECIFIED, + NON_PREEMPTIBLE, PREEMPTIBLE' + type: string + type: object + type: array + securityConfig: + description: Optional. Security settings for + the cluster. + items: + properties: + kerberosConfig: + description: Kerberos related configuration. + items: + properties: + crossRealmTrustAdminServer: + description: Optional. The admin + server (IP or hostname) for the + remote trusted realm in a cross + realm trust relationship. + type: string + crossRealmTrustKdc: + description: Optional. The KDC (IP + or hostname) for the remote trusted + realm in a cross realm trust relationship. + type: string + crossRealmTrustRealm: + description: Optional. The remote + realm the Dataproc on-cluster + KDC will trust, should the user + enable cross realm trust. + type: string + crossRealmTrustSharedPassword: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the shared password + between the on-cluster Kerberos + realm and the remote trusted realm, + in a cross realm trust relationship. + type: string + enableKerberos: + description: 'Optional. Flag to + indicate whether to Kerberize + the cluster (default: false). + Set this field to true to enable + Kerberos on a cluster.' + type: boolean + kdcDbKey: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the master key + of the KDC database. + type: string + keyPassword: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the password to + the user provided key. For the + self-signed certificate, this + password is generated by Dataproc. + type: string + keystore: + description: Optional. The Cloud + Storage URI of the keystore file + used for SSL encryption. If not + provided, Dataproc will provide + a self-signed certificate. + type: string + keystorePassword: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the password to + the user provided keystore. For + the self-signed certificate, this + password is generated by Dataproc. + type: string + kmsKey: + description: Optional. The uri of + the KMS key used to encrypt various + sensitive files. + type: string + realm: + description: Optional. The name + of the on-cluster Kerberos realm. + If not specified, the uppercased + domain of hostnames will be the + realm. + type: string + rootPrincipalPassword: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the root principal + password. + type: string + tgtLifetimeHours: + description: Optional. The lifetime + of the ticket granting ticket, + in hours. If not specified, or + user specifies 0, then default + value 10 will be used. + type: number + truststore: + description: Optional. The Cloud + Storage URI of the truststore + file used for SSL encryption. + If not provided, Dataproc will + provide a self-signed certificate. + type: string + truststorePassword: + description: Optional. The Cloud + Storage URI of a KMS encrypted + file containing the password to + the user provided truststore. + For the self-signed certificate, + this password is generated by + Dataproc. + type: string + type: object + type: array + type: object + type: array + softwareConfig: + description: Optional. The config settings for + software inside the cluster. + items: + properties: + imageVersion: + description: Optional. The version of + software inside the cluster. It must + be one of the supported (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). + If unspecified, it defaults to the latest + Debian version. + type: string + optionalComponents: + items: + type: string + type: array + properties: + additionalProperties: + type: string + description: Optional. A mapping of property + names to values, used to configure Spark + SQL's SparkConf. Properties that conflict + with values set by the Dataproc API + may be overwritten. + type: object + type: object + type: array + stagingBucket: + description: Optional. A Cloud Storage bucket + used to stage job dependencies, config files, + and job driver console output. If you do not + specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, + or EU) for your cluster's staging bucket according + to the Compute Engine zone where your cluster + is deployed, and then create and manage this + project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + type: string + tempBucket: + description: Optional. A Cloud Storage bucket + used to store ephemeral cluster and jobs data, + such as Spark and MapReduce history files. + If you do not specify a temp bucket, Dataproc + will determine a Cloud Storage location (US, + ASIA, or EU) for your cluster's temp bucket + according to the Compute Engine zone where + your cluster is deployed, and then create + and manage this project-level, per-location + bucket. The default bucket has a TTL of 90 + days, but you can use any TTL (or none) if + you specify a bucket. + type: string + workerConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + accelerators: + description: Optional. The Compute Engine + accelerator configuration for these + instances. + items: + properties: + acceleratorCount: + description: The number of the accelerator + cards of this type exposed to + this instance. + type: number + acceleratorType: + description: Full URL, partial URI, + or short name of the accelerator + type resource to expose to this + instance. See (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short + name of the accelerator type resource, + for example, nvidia-tesla-k80. + type: string + type: object + type: array + diskConfig: + description: Optional. Disk option config + settings. + items: + properties: + bootDiskSizeGb: + description: Optional. Size in GB + of the boot disk (default is 500GB). + type: number + bootDiskType: + description: 'Optional. Type of + the boot disk (default is "pd-standard"). + Valid values: "pd-ssd" (Persistent + Disk Solid State Drive) or "pd-standard" + (Persistent Disk Hard Disk Drive).' + type: string + numLocalSsds: + description: Optional. Number of + attached SSDs, from 0 to 4 (default + is 0). If SSDs are not attached, + the boot disk is used to store + runtime logs and (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) + data. If one or more SSDs are + attached, this runtime bulk data + is spread across them, and the + boot disk contains only basic + config and installed binaries. + type: number + type: object + type: array + image: + description: 'Optional. The Compute Engine + image resource used for cluster instances. + The URI can represent an image or image + family. Image examples: * https://www.googleapis.com/compute/beta/projects/ + If the URI is unspecified, it will be + inferred from SoftwareConfig.image_version + or the system default.' + type: string + machineType: + description: 'Optional. The Compute Engine + machine type used for cluster instances. + A full URL, partial URI, or short name + are valid. Examples: * https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + feature, you must use the short name + of the machine type resource, for example, + n1-standard-2`.' + type: string + minCpuPlatform: + description: Optional. Specifies the minimum + cpu platform for the Instance Group. + See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + type: string + numInstances: + description: Optional. The number of VM + instances in the instance group. For + master instance groups, must be set + to 1. + type: number + preemptibility: + description: 'Optional. Specifies the + preemptibility of the instance group. + The default value for master and worker + groups is NON_PREEMPTIBLE. This default + cannot be changed. The default value + for secondary instances is PREEMPTIBLE. + Possible values: PREEMPTIBILITY_UNSPECIFIED, + NON_PREEMPTIBLE, PREEMPTIBLE' + type: string + type: object + type: array + type: object + type: array + labels: + additionalProperties: + type: string + description: 'Optional. The labels to associate with + this cluster. Label keys must be between 1 and 63 + characters long, and must conform to the following + PCRE regular expression: {0,63} No more than 32 + labels can be associated with a given cluster.' + type: object + required: + - clusterName + - config + type: object + type: array + type: object + type: array + project: + description: The project for the resource + type: string + version: + description: Optional. Used to perform a consistent read-modify-write. + This field should be left blank for a CreateWorkflowTemplate + request. It is required for an UpdateWorkflowTemplate request, + and must match the current server version. A typical update + template flow would fetch the current template with a GetWorkflowTemplate + request, which will return the current template with the version + field filled in with the current server version. The user updates + other fields in the template, then returns it as part of the + UpdateWorkflowTemplate request. + type: number + required: + - jobs + - location + - placement + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: WorkflowTemplateStatus defines the observed state of WorkflowTemplate. + properties: + atProvider: + properties: + createTime: + description: Output only. The time template was created. + type: string + id: + description: an identifier for the resource with format projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}} + type: string + placement: + description: Required. WorkflowTemplate scheduling information. + items: + properties: + managedCluster: + description: A cluster that is managed by the workflow. + items: + properties: + config: + description: Required. The cluster configuration. + items: + properties: + endpointConfig: + description: Optional. Port/endpoint configuration + for this cluster + items: + properties: + httpPorts: + additionalProperties: + type: string + description: Output only. The map of port + descriptions to URLs. Will only be populated + if enable_http_port_access is true. + type: object + type: object + type: array + lifecycleConfig: + description: Optional. Lifecycle setting for + the cluster. + items: + properties: + idleStartTime: + description: Output only. The time when + cluster became idle (most recent job + finished) and became eligible for deletion + due to idleness (see JSON representation + of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + type: string + type: object + type: array + masterConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + instanceNames: + description: Output only. The list of + instance names. Dataproc derives the + names from cluster_name, num_instances, + and the instance group. + items: + type: string + type: array + isPreemptible: + description: Output only. Specifies that + this instance group contains preemptible + instances. + type: boolean + managedGroupConfig: + description: Output only. The config for + Compute Engine Instance Group Manager + that manages this group. This is only + used for preemptible instance groups. + items: + properties: + instanceGroupManagerName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + instanceTemplateName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + type: object + type: array + type: object + type: array + secondaryWorkerConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + instanceNames: + description: Output only. The list of + instance names. Dataproc derives the + names from cluster_name, num_instances, + and the instance group. + items: + type: string + type: array + isPreemptible: + description: Output only. Specifies that + this instance group contains preemptible + instances. + type: boolean + managedGroupConfig: + description: Output only. The config for + Compute Engine Instance Group Manager + that manages this group. This is only + used for preemptible instance groups. + items: + properties: + instanceGroupManagerName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + instanceTemplateName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + type: object + type: array + type: object + type: array + workerConfig: + description: Optional. The Compute Engine config + settings for additional worker instances in + a cluster. + items: + properties: + instanceNames: + description: Output only. The list of + instance names. Dataproc derives the + names from cluster_name, num_instances, + and the instance group. + items: + type: string + type: array + isPreemptible: + description: Output only. Specifies that + this instance group contains preemptible + instances. + type: boolean + managedGroupConfig: + description: Output only. The config for + Compute Engine Instance Group Manager + that manages this group. This is only + used for preemptible instance groups. + items: + properties: + instanceGroupManagerName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + instanceTemplateName: + description: 'Output only. The resource + name of the workflow template, + as described in https://cloud.google.com/apis/design/resource_names. + * For projects.regions.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/regions/{region}/workflowTemplates/{template_id} + * For projects.locations.workflowTemplates, + the resource name of the template + has the following format: projects/{project_id}/locations/{location}/workflowTemplates/{template_id}' + type: string + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + type: object + type: array + updateTime: + description: Output only. The time template was last updated. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: []