diff --git a/cmd/apiserver/app/options/options.go b/cmd/apiserver/app/options/options.go index 875f43566..d83b9c823 100644 --- a/cmd/apiserver/app/options/options.go +++ b/cmd/apiserver/app/options/options.go @@ -39,7 +39,7 @@ type ClusterPediaServerOptions struct { CoreAPI *genericoptions.CoreAPIOptions FeatureGate featuregate.FeatureGate Admission *genericoptions.AdmissionOptions - // Traces *genericoptions.TracingOptions + Traces *genericoptions.TracingOptions Storage *storageoptions.StorageOptions } @@ -66,7 +66,7 @@ func NewServerOptions() *ClusterPediaServerOptions { CoreAPI: genericoptions.NewCoreAPIOptions(), FeatureGate: feature.DefaultFeatureGate, Admission: genericoptions.NewAdmissionOptions(), - // Traces: genericoptions.NewTracingOptions(), + Traces: genericoptions.NewTracingOptions(), Storage: storageoptions.NewStorageOptions(), } @@ -154,6 +154,9 @@ func (o *ClusterPediaServerOptions) genericOptionsApplyTo(config *genericapiserv if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, client, dynamicClient, o.FeatureGate); err != nil { return err } + if err := o.Traces.ApplyTo(nil, &config.Config); err != nil { + return err + } return nil } @@ -176,7 +179,7 @@ func (o *ClusterPediaServerOptions) Flags() cliflag.NamedFlagSets { logsapi.AddFlags(o.Logs, fss.FlagSet("logs")) // o.Admission.AddFlags(fss.FlagSet("admission")) - // o.Traces.AddFlags(fss.FlagSet("traces")) + o.Traces.AddFlags(fss.FlagSet("traces")) o.Storage.AddFlags(fss.FlagSet("storage")) return fss diff --git a/deploy/clusterpedia_apiserver_deployment.yaml b/deploy/clusterpedia_apiserver_deployment.yaml index b7eeb72c8..9ef52dfd3 100644 --- a/deploy/clusterpedia_apiserver_deployment.yaml +++ b/deploy/clusterpedia_apiserver_deployment.yaml @@ -41,6 +41,7 @@ spec: - /usr/local/bin/apiserver - --secure-port=443 - --storage-config=/etc/clusterpedia/storage/internalstorage-config.yaml + - --tracing-config-file=/etc/clusterpedia/trace/tracing-config.yaml - -v=3 env: - name: DB_PASSWORD @@ -52,8 +53,27 @@ spec: - name: internalstorage-config mountPath: /etc/clusterpedia/storage readOnly: true + - name: tracing-config + mountPath: /etc/clusterpedia/trace + readOnly: true serviceAccountName: clusterpedia-apiserver volumes: - name: internalstorage-config configMap: name: clusterpedia-internalstorage + - name: tracing-config + configMap: + name: clusterpedia-tracing-config +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: clusterpedia-tracing-config + namespace: clusterpedia-system +data: + tracing-config.yaml: | + apiVersion: apiserver.config.k8s.io/v1beta1 + kind: TracingConfiguration + # default + # endpoint: localhost:4317 + samplingRatePerMillion: 1000000 \ No newline at end of file diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 59c2ee60b..88fff05a8 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -124,6 +124,7 @@ func (config completedConfig) New() (*ClusterPediaServer, error) { resourceServerConfig := kubeapiserver.NewDefaultConfig() resourceServerConfig.GenericConfig.ExternalAddress = config.GenericConfig.ExternalAddress resourceServerConfig.GenericConfig.LoopbackClientConfig = config.GenericConfig.LoopbackClientConfig + resourceServerConfig.GenericConfig.TracerProvider = config.GenericConfig.TracerProvider resourceServerConfig.ExtraConfig = kubeapiserver.ExtraConfig{ InformerFactory: clusterpediaInformerFactory, StorageFactory: config.StorageFactory, diff --git a/pkg/kubeapiserver/apiserver.go b/pkg/kubeapiserver/apiserver.go index f4579e1ea..bdb6550fd 100644 --- a/pkg/kubeapiserver/apiserver.go +++ b/pkg/kubeapiserver/apiserver.go @@ -11,9 +11,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" genericrequest "k8s.io/apiserver/pkg/endpoints/request" + genericfeatures "k8s.io/apiserver/pkg/features" genericapiserver "k8s.io/apiserver/pkg/server" genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/healthz" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/restmapper" informers "github.com/clusterpedia-io/clusterpedia/pkg/generated/informers/externalversions" @@ -145,6 +147,10 @@ func BuildHandlerChain(apiHandler http.Handler, c *genericapiserver.Config) http // https://github.com/clusterpedia-io/clusterpedia/issues/54 handler = filters.RemoveFieldSelectorFromRequest(handler) + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) { + handler = genericapifilters.WithTracing(handler, c.TracerProvider) + } + /* used for debugging handler = genericapifilters.WithWarningRecorder(handler) handler = WithClusterName(handler, "cluster-1") diff --git a/vendor/github.com/clusterpedia-io/api b/vendor/github.com/clusterpedia-io/api deleted file mode 120000 index 7c9f729c5..000000000 --- a/vendor/github.com/clusterpedia-io/api +++ /dev/null @@ -1 +0,0 @@ -../../../staging/src/github.com/clusterpedia-io/api/ \ No newline at end of file diff --git a/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/doc.go b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/doc.go new file mode 100644 index 000000000..e8b67bb3d --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=cluster.clusterpedia.io + +// Package v1alpha2 is the v1alpha2 version of the API +package v1alpha2 diff --git a/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/register.go b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/register.go new file mode 100644 index 000000000..c5498b7d2 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/register.go @@ -0,0 +1,51 @@ +package v1alpha2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "cluster.clusterpedia.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PediaCluster{}, + &PediaClusterList{}, + &ClusterSyncResources{}, + &ClusterSyncResourcesList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/types.go b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/types.go new file mode 100644 index 000000000..fe386dedf --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/types.go @@ -0,0 +1,249 @@ +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + ValidatedCondition = "Validated" + SynchroRunningCondition = "SynchroRunning" + ClusterHealthyCondition = "ClusterHealthy" + ReadyCondition = "Ready" + + // deprecated + ClusterSynchroInitializedCondition = "ClusterSynchroInitialized" +) + +const ( + InvalidConfigReason = "InvalidConfig" + InvalidSyncResourcesReason = "InvalidSyncResources" + ValidatedReason = "Validated" + + SynchroWaitInitReason = "WaitInit" + SynchroInitialFailedReason = "InitialFailed" + SynchroPendingReason = "Pending" + SynchroRunningReason = "Running" + SynchroShutdownReason = "Shutdown" + + ClusterMonitorStopReason = "MonitorStop" + ClusterHealthyReason = "Healthy" + ClusterUnhealthyReason = "Unhealthy" + ClusterNotReachableReason = "NotReachable" + + ReadyReason = "Ready" + NotReadyReason = "NotReady" +) + +const ( + ResourceSyncStatusPending = "Pending" + ResourceSyncStatusSyncing = "Syncing" + ResourceSyncStatusStop = "Stop" + ResourceSyncStatusUnknown = "Unknown" + ResourceSyncStatusError = "Error" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=".status.conditions[?(@.type == 'Ready')].status" +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=".status.version" +// +kubebuilder:printcolumn:name="APIServer",type=string,JSONPath=".status.apiserver" +// +kubebuilder:printcolumn:name="Validated",type=string,JSONPath=".status.conditions[?(@.type == 'Validated')].reason",priority=10 +// +kubebuilder:printcolumn:name="SynchroRunning",type=string,JSONPath=".status.conditions[?(@.type == 'SynchroRunning')].reason",priority=10 +// +kubebuilder:printcolumn:name="ClusterHealthy",type=string,JSONPath=".status.conditions[?(@.type == 'ClusterHealthy')].reason",priority=10 +type PediaCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ClusterSpec `json:"spec,omitempty"` + + // +optional + Status ClusterStatus `json:"status,omitempty"` +} + +type ClusterSpec struct { + // +optional + Kubeconfig []byte `json:"kubeconfig,omitempty"` + + // +optional + APIServer string `json:"apiserver,omitempty"` + + // +optional + TokenData []byte `json:"tokenData,omitempty"` + + // +optional + CAData []byte `json:"caData,omitempty"` + + // +optional + CertData []byte `json:"certData,omitempty"` + + // +optional + KeyData []byte `json:"keyData,omitempty"` + + // +required + SyncResources []ClusterGroupResources `json:"syncResources"` + + // +optional + SyncAllCustomResources bool `json:"syncAllCustomResources,omitempty"` + + // +optional + SyncResourcesRefName string `json:"syncResourcesRefName,omitempty"` +} + +type ClusterGroupResources struct { + Group string `json:"group"` + + // +optional + Versions []string `json:"versions,omitempty"` + + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Resources []string `json:"resources"` +} + +type ClusterStatus struct { + // +optional + APIServer string `json:"apiserver,omitempty"` + + // +optional + Version string `json:"version,omitempty"` + + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // +optional + SyncResources []ClusterGroupResourcesStatus `json:"syncResources,omitempty"` +} + +type ClusterGroupResourcesStatus struct { + // +required + // +kubebuilder:validation:Required + Group string `json:"group"` + + // +required + // +kubebuilder:validation:Required + Resources []ClusterResourceStatus `json:"resources"` +} + +type ClusterResourceStatus struct { + // +required + // +kubebuilder:validation:Required + Name string `json:"name"` + + // +required + // +kubebuilder:validation:Required + Kind string `json:"kind"` + + // +required + // +kubebuilder:validation:Required + Namespaced bool `json:"namespaced"` + + // +required + // +kubebuilder:validation:Required + SyncConditions []ClusterResourceSyncCondition `json:"syncConditions"` +} + +type ClusterResourceSyncCondition struct { + // +required + // +kubebuilder:validation:Required + Version string `json:"version"` + + // optional + SyncVersion string `json:"syncVersion,omitempty"` + + // optional + SyncResource string `json:"syncResource,omitempty"` + + // optional + StorageVersion string `json:"storageVersion,omitempty"` + + // optional + StorageResource string `json:"storageResource,omitempty"` + + // +required + // +kubebuilder:validation:Required + Status string `json:"status"` + + // optional + Reason string `json:"reason,omitempty"` + + // optional + Message string `json:"message,omitempty"` + + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime metav1.Time `json:"lastTransitionTime"` +} + +func (cond ClusterResourceSyncCondition) SyncGVR(resource schema.GroupResource) schema.GroupVersionResource { + if cond.Version == "" || cond.SyncVersion == "" { + return schema.GroupVersionResource{} + } + + if cond.SyncResource != "" { + resource = schema.ParseGroupResource(cond.SyncResource) + } + if cond.SyncVersion != "" { + return resource.WithVersion(cond.StorageVersion) + } + return resource.WithVersion(cond.Version) +} + +func (cond ClusterResourceSyncCondition) StorageGVR(resource schema.GroupResource) schema.GroupVersionResource { + if cond.Version == "" || cond.StorageVersion == "" { + return schema.GroupVersionResource{} + } + + if cond.StorageResource != "" { + return schema.ParseGroupResource(cond.StorageResource).WithVersion(cond.StorageVersion) + } + return resource.WithVersion(cond.StorageVersion) +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PediaClusterList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PediaCluster `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope="Cluster" +type ClusterSyncResources struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ClusterSyncResourcesSpec `json:"spec,omitempty"` +} + +type ClusterSyncResourcesSpec struct { + // +required + SyncResources []ClusterGroupResources `json:"syncResources"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterSyncResourcesList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ClusterSyncResources `json:"items"` +} diff --git a/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..b2183dbca --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/cluster/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,322 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterGroupResources) DeepCopyInto(out *ClusterGroupResources) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGroupResources. +func (in *ClusterGroupResources) DeepCopy() *ClusterGroupResources { + if in == nil { + return nil + } + out := new(ClusterGroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterGroupResourcesStatus) DeepCopyInto(out *ClusterGroupResourcesStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ClusterResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGroupResourcesStatus. +func (in *ClusterGroupResourcesStatus) DeepCopy() *ClusterGroupResourcesStatus { + if in == nil { + return nil + } + out := new(ClusterGroupResourcesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceStatus) DeepCopyInto(out *ClusterResourceStatus) { + *out = *in + if in.SyncConditions != nil { + in, out := &in.SyncConditions, &out.SyncConditions + *out = make([]ClusterResourceSyncCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceStatus. +func (in *ClusterResourceStatus) DeepCopy() *ClusterResourceStatus { + if in == nil { + return nil + } + out := new(ClusterResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceSyncCondition) DeepCopyInto(out *ClusterResourceSyncCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceSyncCondition. +func (in *ClusterResourceSyncCondition) DeepCopy() *ClusterResourceSyncCondition { + if in == nil { + return nil + } + out := new(ClusterResourceSyncCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.Kubeconfig != nil { + in, out := &in.Kubeconfig, &out.Kubeconfig + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.TokenData != nil { + in, out := &in.TokenData, &out.TokenData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CAData != nil { + in, out := &in.CAData, &out.CAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertData != nil { + in, out := &in.CertData, &out.CertData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.SyncResources != nil { + in, out := &in.SyncResources, &out.SyncResources + *out = make([]ClusterGroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SyncResources != nil { + in, out := &in.SyncResources, &out.SyncResources + *out = make([]ClusterGroupResourcesStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncResources) DeepCopyInto(out *ClusterSyncResources) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncResources. +func (in *ClusterSyncResources) DeepCopy() *ClusterSyncResources { + if in == nil { + return nil + } + out := new(ClusterSyncResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncResources) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncResourcesList) DeepCopyInto(out *ClusterSyncResourcesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSyncResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncResourcesList. +func (in *ClusterSyncResourcesList) DeepCopy() *ClusterSyncResourcesList { + if in == nil { + return nil + } + out := new(ClusterSyncResourcesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSyncResourcesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSyncResourcesSpec) DeepCopyInto(out *ClusterSyncResourcesSpec) { + *out = *in + if in.SyncResources != nil { + in, out := &in.SyncResources, &out.SyncResources + *out = make([]ClusterGroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncResourcesSpec. +func (in *ClusterSyncResourcesSpec) DeepCopy() *ClusterSyncResourcesSpec { + if in == nil { + return nil + } + out := new(ClusterSyncResourcesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaCluster) DeepCopyInto(out *PediaCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaCluster. +func (in *PediaCluster) DeepCopy() *PediaCluster { + if in == nil { + return nil + } + out := new(PediaCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PediaCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaClusterList) DeepCopyInto(out *PediaClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PediaCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaClusterList. +func (in *PediaClusterList) DeepCopy() *PediaClusterList { + if in == nil { + return nil + } + out := new(PediaClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PediaClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/doc.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/doc.go new file mode 100644 index 000000000..eb37a35a2 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=clusterpedia.io +package clusterpedia diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/lexer.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/lexer.go new file mode 100644 index 000000000..ae85694ca --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/lexer.go @@ -0,0 +1,150 @@ +/* + Copy from + https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/apimachinery/pkg/labels/selector.go#L452-L588 +*/ + +package fields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]labels.Token{ + ")": labels.ClosedParToken, + ",": labels.CommaToken, + "!": labels.DoesNotExistToken, + "==": labels.DoubleEqualsToken, + "=": labels.EqualsToken, + ">": labels.GreaterThanToken, + "in": labels.InToken, + "<": labels.LessThanToken, + "!=": labels.NotEqualsToken, + "notin": labels.NotInToken, + "(": labels.OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok labels.Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detects if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read returns the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok labels.Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return labels.IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (labels.Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return labels.ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok labels.Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return labels.EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/parser.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/parser.go new file mode 100644 index 000000000..9a4ca004e --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/parser.go @@ -0,0 +1,270 @@ +/* + Reference from + https://github.com/kubernetes/kubernetes/blob/f5be5052e3d0808abb904aebd3218fe4a5c2dd82/staging/src/k8s.io/apimachinery/pkg/labels/selector.go#L590 + +*/ + +package fields + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + unaryOperators = []string{ + string(selection.Exists), string(selection.DoesNotExist), + } + binaryOperators = []string{ + string(selection.In), string(selection.NotIn), + string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals), + string(selection.GreaterThan), string(selection.LessThan), + } + validRequirementOperators = append(binaryOperators, unaryOperators...) +) + +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +func (p *Parser) lookahead(context ParserContext) (labels.Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case labels.InToken, labels.NotInToken: + tok = labels.IdentifierToken + } + } + return tok, lit +} + +func (p *Parser) consume(context ParserContext) (labels.Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case labels.InToken, labels.NotInToken: + tok = labels.IdentifierToken + } + } + return tok, lit +} + +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == labels.EndOfStringToken { + break + } + } +} + +func (p *Parser) parse() ([]Requirement, error) { + p.scan() + + var requirements []Requirement + for { + tok, lit := p.lookahead(Values) + switch tok { + case labels.IdentifierToken, labels.DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case labels.EndOfStringToken: + return requirements, nil + case labels.CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != labels.IdentifierToken && t2 != labels.DoesNotExistToken { + return nil, fmt.Errorf("found %q, expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found %q, expected: ',' or 'end of string'", l) + } + case labels.EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found %q, expected: !, identifier, or ''end of string", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { + return NewRequirement(key, operator, []string{}) + } + + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) +} + +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == labels.DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != labels.IdentifierToken { + return "", "", fmt.Errorf("found %q, expected: identifier", literal) + } + + if t, _ := p.lookahead(Values); t == labels.EndOfStringToken || t == labels.CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case labels.InToken: + op = selection.In + case labels.EqualsToken: + op = selection.Equals + case labels.DoubleEqualsToken: + op = selection.DoubleEquals + case labels.GreaterThanToken: + op = selection.GreaterThan + case labels.LessThanToken: + op = selection.LessThan + case labels.NotInToken: + op = selection.NotIn + case labels.NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", ")) + } + return op, nil +} + +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != labels.OpenParToken { + return nil, fmt.Errorf("found %q, expcted:'('", lit) + } + + tok, lit = p.lookahead(Values) + switch tok { + case labels.IdentifierToken, labels.CommaToken: + s, err := p.parseIdentifiersList() + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != labels.ClosedParToken { + return nil, fmt.Errorf("found '%s', expectedd: ')'", lit) + } + return s, nil + case labels.ClosedParToken: + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found %q, expected: ',', ')' or identifier", lit) + } +} + +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case labels.IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case labels.CommaToken: + continue + case labels.ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found %q, expected: ',' or ')'", lit2) + } + case labels.CommaToken: + if s.Len() == 0 { + s.Insert("") // to handle '(,' + } + tok2, _ := p.lookahead(Values) + if tok2 == labels.ClosedParToken { + s.Insert("") // to handle ',)' Double "" removed by StringSet + return s, nil + } + if tok2 == labels.CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: + return s, fmt.Errorf("found %q, expected: ',',or identifier", lit) + } + } +} + +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, _ := p.lookahead(Values) + if tok == labels.EndOfStringToken || tok == labels.CommaToken { + s.Insert("") + return s, nil + } + tok, lit := p.consume(Values) + if tok != labels.IdentifierToken { + return nil, fmt.Errorf("found %q, expected: identifier", lit) + } + s.Insert(lit) + return s, nil +} + +// safeSort sorts input strings without modification +func safeSort(in []string) []string { + if sort.StringsAreSorted(in) { + return in + } + out := make([]string, len(in)) + copy(out, in) + sort.Strings(out) + return out +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/selector.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/selector.go new file mode 100644 index 000000000..eb0f1917d --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/fields/selector.go @@ -0,0 +1,363 @@ +package fields + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type Requirements []Requirement + +// Selector represents a label selector +type Selector interface { + Empty() bool + + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector +} + +type internalSelector []Requirement + +func (s internalSelector) Empty() bool { return len(s) == 0 } + +func (s internalSelector) Requirements() (Requirements, bool) { return Requirements(s), true } + +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + + // The `field.Path` struct is included in the `Field`, + // so DeepCopy becomes more complicated, and using `Parse` + // simplifies the logic. + result, _ := Parse(s.String()) + return result.(internalSelector) +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +func (s internalSelector) Add(reqs ...Requirement) Selector { + ret := make(internalSelector, 0, len(s)+len(reqs)) + ret = append(ret, s...) + ret = append(ret, reqs...) + sort.Sort(ByKey(ret)) + return ret +} + +func (s internalSelector) String() string { + var reqs []string + for ix := range s { + reqs = append(reqs, s[ix].String()) + } + return strings.Join(reqs, ",") +} + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (rs ByKey) Len() int { return len(rs) } + +func (rs ByKey) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } + +func (rs ByKey) Less(i, j int) bool { return rs[i].key < rs[j].key } + +type Requirement struct { + key string + + fields []Field + operator selection.Operator + strValues []string +} + +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + fields, err := parseFields(key, nil) + if err != nil { + return nil, err + } + if len(fields) == 0 { + return nil, errors.New("fields is empty") + } + + var allErrs field.ErrorList + for _, field := range fields { + if err := field.Validate(); err != nil { + allErrs = append(allErrs, err) + } + } + + lastField := fields[len(fields)-1] + path := lastField.Path() + if lastField.IsList() { + allErrs = append(allErrs, field.Invalid(path, lastField.Name(), "last field could not be list")) + } + + valuePath := path.Child("values") + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'in', 'notin' operators, values set can't be empty")) + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "exact-match compatibility requires one single value")) + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "values set must be empty for exists and does not exist")) + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + allErrs = append(allErrs, field.Invalid(valuePath, vals, "for 'Gt', 'Lt' operators, exactly one value is required")) + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + allErrs = append(allErrs, field.Invalid(valuePath.Index(i), vals[i], "for 'Gt', 'Lt' operators, the value must be an integer")) + } + } + default: + allErrs = append(allErrs, field.NotSupported(path.Child("operator"), op, validRequirementOperators)) + } + + // values not validate + + return &Requirement{key: key, fields: fields, operator: op, strValues: vals}, allErrs.ToAggregate() +} + +func (r *Requirement) Fields() []Field { + fields := make([]Field, len(r.fields)) + copy(fields, r.fields) + return fields +} + +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +func (r *Requirement) String() string { + var sb strings.Builder + sb.Grow( + // length of r.key + len(r.key) + + // length of 'r.operator' + 2 spaces for the worst case ('in' and 'notin') + len(r.operator) + 2 + + // length of 'r.strValues' slice times. Heuristically 5 chars per word + +5*len(r.strValues)) + if r.operator == selection.DoesNotExist { + sb.WriteString("!") + } + sb.WriteString(r.key) + + switch r.operator { + case selection.Equals: + sb.WriteString("=") + case selection.DoubleEquals: + sb.WriteString("==") + case selection.NotEquals: + sb.WriteString("!=") + case selection.In: + sb.WriteString(" in ") + case selection.NotIn: + sb.WriteString(" notin ") + case selection.GreaterThan: + sb.WriteString(">") + case selection.LessThan: + sb.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return sb.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + sb.WriteString("(") + } + if len(r.strValues) == 1 { + sb.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + // normalizes value order on output, without mutating the in-memory selector representation + // also avoids normalization when it is not required, and ensures we do not mutate shared data + sb.WriteString(strings.Join(safeSort(r.strValues), ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + sb.WriteString(")") + } + return sb.String() +} + +type Field struct { + path *field.Path + + name string + isList bool + index int +} + +func NewField(parentPath *field.Path, name string) Field { + var path *field.Path + if parentPath == nil { + path = field.NewPath(name) + } else { + path = parentPath.Child(name) + } + + return Field{path: path, name: name} +} + +func (f *Field) setListIndex(index int) { + f.path = f.path.Index(index) + f.isList = true + f.index = index +} + +func (f *Field) Name() string { + return f.name +} + +func (f *Field) IsList() bool { + return f.isList +} + +func (f *Field) GetListIndex() (int, bool) { + return f.index, f.isList +} + +func (f *Field) Path() *field.Path { + return f.path +} + +func (f *Field) Validate() *field.Error { + if errs := validation.IsQualifiedName(f.name); len(errs) != 0 { + return field.Invalid(f.path, f.name, strings.Join(errs, "; ")) + } + return nil +} + +func parseFields(key string, fields []Field) ([]Field, error) { + if len(key) == 0 { + return fields, nil + } + + if key[0] == '.' { + if len(key) == 1 { + return nil, errors.New("empty field after '.'") + } + + key = key[1:] + } + + var parentPath *field.Path + if len(fields) != 0 { + parentPath = fields[len(fields)-1].Path() + } + + if key[0] == '[' { + rightIndex := strings.IndexByte(key, ']') + switch { + case rightIndex == -1: + return nil, errors.New("not found ']'") + + // handle 'field[]' + case rightIndex == 1: + if len(fields) == 0 { + return nil, errors.New("empty [], not found list field") + } + fields[len(fields)-1].isList = true + + // handle `lastfield['field']` + case key[1] == '\'' || key[1] == '"': + inSquKey := key[1:rightIndex] + + wrap, inSquKey := inSquKey[0], inSquKey[1:] + rightSquIndex := strings.IndexByte(inSquKey, wrap) + switch rightSquIndex { + case -1: + return nil, fmt.Errorf("not found right '%c'", wrap) + case 1: + return nil, fmt.Errorf("empty field %c%c", wrap, wrap) + case len(inSquKey) - 1: + fields = append(fields, NewField(parentPath, inSquKey[0:rightSquIndex])) + default: + return nil, fmt.Errorf("invalid field ['%s]", inSquKey) + } + + // handle 'field[0]' + default: + if len(fields) == 0 { + return nil, errors.New("[], not found list field") + } + lastField := &fields[len(fields)-1] + + indexStr := key[1:rightIndex] + index, err := strconv.Atoi(indexStr) + if err != nil { + return nil, fmt.Errorf("%s[] list index invalid. if %s is a field, please use ['%s'] or .'%s'", lastField.Path(), indexStr, indexStr, indexStr) + } + + lastField.setListIndex(index) + } + return parseFields(key[rightIndex+1:], fields) + } + + if key[0] == '\'' || key[0] == '"' { + wrap := key[0] + if len(key) == 1 { + return nil, fmt.Errorf("not found right '%c'", wrap) + } + + key = key[1:] + rightIndex := strings.IndexByte(key, wrap) + if rightIndex == -1 { + return nil, fmt.Errorf("not found right '%c'", wrap) + } + if rightIndex == 1 { + return nil, fmt.Errorf("empty field %c%c", wrap, wrap) + } + + fields = append(fields, NewField(parentPath, key[0:rightIndex])) + return parseFields(key[rightIndex+1:], fields) + } + + rightIndex := strings.IndexAny(key, ".[") + if rightIndex == -1 { + fields = append(fields, NewField(parentPath, key)) + return fields, nil + } + + fields = append(fields, NewField(parentPath, key[:rightIndex])) + return parseFields(key[rightIndex:], fields) +} + +func Parse(selector string) (Selector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) + return internalSelector(items), nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/install/install.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/install/install.go new file mode 100644 index 000000000..3984e87a1 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/install/install.go @@ -0,0 +1,14 @@ +package install + +import ( + internal "github.com/clusterpedia-io/api/clusterpedia" + "github.com/clusterpedia-io/api/clusterpedia/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +func Install(scheme *runtime.Scheme) { + utilruntime.Must(internal.Install(scheme)) + utilruntime.Must(v1beta1.Install(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion)) +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/register.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/register.go new file mode 100644 index 000000000..13e793085 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/register.go @@ -0,0 +1,29 @@ +package clusterpedia + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "clusterpedia.io" + +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + Install = localSchemeBuilder.AddToScheme +) + +func init() { + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ListOptions{}, + &CollectionResource{}, + &CollectionResourceList{}, + ) + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/scheme/register.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/scheme/register.go new file mode 100644 index 000000000..10b8254cb --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/scheme/register.go @@ -0,0 +1,17 @@ +package scheme + +import ( + "github.com/clusterpedia-io/api/clusterpedia/install" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var scheme = runtime.NewScheme() + +var Codecs = serializer.NewCodecFactory(scheme) + +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + install.Install(scheme) +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/types.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/types.go new file mode 100644 index 000000000..35ed197c6 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/types.go @@ -0,0 +1,111 @@ +package clusterpedia + +import ( + "net/url" + + metainternal "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/clusterpedia-io/api/clusterpedia/fields" +) + +const ( + SearchLabelNames = "search.clusterpedia.io/names" + SearchLabelClusters = "search.clusterpedia.io/clusters" + SearchLabelNamespaces = "search.clusterpedia.io/namespaces" + SearchLabelOrderBy = "search.clusterpedia.io/orderby" + + SearchLabelOwnerUID = "search.clusterpedia.io/owner-uid" + SearchLabelOwnerName = "search.clusterpedia.io/owner-name" + SearchLabelOwnerGroupResource = "search.clusterpedia.io/owner-gr" + SearchLabelOwnerSeniority = "search.clusterpedia.io/owner-seniority" + + SearchLabelWithContinue = "search.clusterpedia.io/with-continue" + SearchLabelWithRemainingCount = "search.clusterpedia.io/with-remaining-count" + + SearchLabelLimit = "search.clusterpedia.io/limit" + SearchLabelOffset = "search.clusterpedia.io/offset" + + SearchLabelSince = "search.clusterpedia.io/since" + SearchLabelBefore = "search.clusterpedia.io/before" + + ShadowAnnotationClusterName = "shadow.clusterpedia.io/cluster-name" + ShadowAnnotationGroupVersionResource = "shadow.clusterpedia.io/gvr" +) + +type OrderBy struct { + Field string + Desc bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +type ListOptions struct { + metainternal.ListOptions + + Names []string + ClusterNames []string + Namespaces []string + OrderBy []OrderBy + + OwnerName string + OwnerUID string + OwnerGroupResource schema.GroupResource + OwnerSeniority int + + Since *metav1.Time + Before *metav1.Time + + WithContinue *bool + WithRemainingCount *bool + + // +k8s:conversion-fn:drop + EnhancedFieldSelector fields.Selector + + // +k8s:conversion-fn:drop + ExtraLabelSelector labels.Selector + + // +k8s:conversion-fn:drop + URLQuery url.Values + + // RelatedResources []schema.GroupVersionKind + + OnlyMetadata bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CollectionResource struct { + metav1.TypeMeta + metav1.ObjectMeta + + ResourceTypes []CollectionResourceType + Items []runtime.Object + + Continue string + RemainingItemCount *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CollectionResourceList struct { + metav1.TypeMeta + metav1.ListMeta + + Items []CollectionResource +} + +type CollectionResourceType struct { + Group string + Version string + Kind string + Resource string +} + +func (t CollectionResourceType) GroupResource() schema.GroupResource { + return schema.GroupResource{ + Group: t.Group, + Resource: t.Resource, + } +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go new file mode 100644 index 000000000..4d3f6d84a --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go @@ -0,0 +1,376 @@ +package v1beta1 + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + metainternal "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/clusterpedia-io/api/clusterpedia" + "github.com/clusterpedia-io/api/clusterpedia/fields" +) + +func Convert_v1beta1_ListOptions_To_clusterpedia_ListOptions(in *ListOptions, out *clusterpedia.ListOptions, s conversion.Scope) error { + fieldSelector := in.FieldSelector + defer func() { + in.FieldSelector = fieldSelector + }() + + // skip convert fieldSelector + in.FieldSelector = "" + if err := metainternal.Convert_v1_ListOptions_To_internalversion_ListOptions(&in.ListOptions, &out.ListOptions, s); err != nil { + return err + } + + if err := convert_string_To_fields_Selector(&fieldSelector, &out.EnhancedFieldSelector, s); err != nil { + return err + } + + if err := convert_String_To_Slice_string(&in.Names, &out.Names, s); err != nil { + return err + } + if err := convert_String_To_Slice_string(&in.ClusterNames, &out.ClusterNames, s); err != nil { + return err + } + if err := convert_String_To_Slice_string(&in.Namespaces, &out.Namespaces, s); err != nil { + return err + } + + var orderbys []string + if err := convert_String_To_Slice_string(&in.OrderBy, &orderbys, s); err != nil { + return err + } + if err := convert_Slice_string_To_clusterpedia_Slice_orderby(&orderbys, &out.OrderBy, " ", s); err != nil { + return err + } + + out.OwnerUID = in.OwnerUID + out.OwnerName = in.OwnerName + if in.OwnerGroupResource != "" { + out.OwnerGroupResource = schema.ParseGroupResource(in.OwnerGroupResource) + } + out.OwnerSeniority = in.OwnerSeniority + + if err := convert_String_To_Pointer_metav1_Time(&in.Since, &out.Since, nil); err != nil { + return err + } + + if err := convert_String_To_Pointer_metav1_Time(&in.Before, &out.Before, nil); err != nil { + return err + } + + out.WithContinue = in.WithContinue + out.WithRemainingCount = in.WithRemainingCount + + if out.LabelSelector != nil { + var ( + labelRequest []labels.Requirement + extraLabelRequest []labels.Requirement + ) + + if requirements, selectable := out.LabelSelector.Requirements(); selectable { + for _, require := range requirements { + values := require.Values().UnsortedList() + switch require.Key() { + case clusterpedia.SearchLabelNames: + if len(out.Names) == 0 && len(values) != 0 { + out.Names = values + } + case clusterpedia.SearchLabelClusters: + if len(out.ClusterNames) == 0 && len(values) != 0 { + out.ClusterNames = values + } + case clusterpedia.SearchLabelNamespaces: + if len(out.Namespaces) == 0 && len(values) != 0 { + out.Namespaces = values + } + case clusterpedia.SearchLabelOwnerUID: + if out.OwnerUID == "" && len(values) == 1 { + out.OwnerUID = values[0] + } + case clusterpedia.SearchLabelOwnerName: + if out.OwnerName == "" && len(values) == 1 { + out.OwnerName = values[0] + } + case clusterpedia.SearchLabelOwnerGroupResource: + if out.OwnerGroupResource.Empty() && len(values) == 1 { + out.OwnerGroupResource = schema.ParseGroupResource(values[0]) + } + case clusterpedia.SearchLabelOwnerSeniority: + if out.OwnerSeniority == 0 && len(values) == 1 { + seniority, err := strconv.Atoi(values[0]) + if err != nil { + return fmt.Errorf("Invalid Query OwnerSeniority(%s): %w", values[0], err) + } + out.OwnerSeniority = seniority + } + case clusterpedia.SearchLabelSince: + if out.Since == nil && len(values) == 1 { + if err := convert_String_To_Pointer_metav1_Time(&values[0], &out.Since, nil); err != nil { + return fmt.Errorf("Invalid Query Since(%s): %w", values[0], err) + } + } + case clusterpedia.SearchLabelBefore: + if out.Before == nil && len(values) == 1 { + if err := convert_String_To_Pointer_metav1_Time(&values[0], &out.Before, nil); err != nil { + return fmt.Errorf("Invalid Query Before(%s): %w", values[0], err) + } + } + case clusterpedia.SearchLabelOrderBy: + if len(out.OrderBy) == 0 && len(values) != 0 { + if err := convert_Slice_string_To_clusterpedia_Slice_orderby(&values, &out.OrderBy, "_", s); err != nil { + return err + } + } + case clusterpedia.SearchLabelLimit: + if out.Limit == 0 && len(values) != 0 { + limit, err := strconv.ParseInt(values[0], 10, 64) + if err != nil { + return fmt.Errorf("Invalid Query Limit: %w", err) + } + out.Limit = limit + } + case clusterpedia.SearchLabelOffset: + if out.Continue == "" && len(values) != 0 { + out.Continue = values[0] + } + + if out.Continue != "" { + _, err := strconv.ParseInt(out.Continue, 10, 64) + if err != nil { + return fmt.Errorf("Invalid Query Offset(%s): %w", out.Continue, err) + } + } + case clusterpedia.SearchLabelWithContinue: + if in.WithContinue == nil && len(values) != 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.WithContinue, s); err != nil { + return err + } + } + case clusterpedia.SearchLabelWithRemainingCount: + if in.WithRemainingCount == nil && len(values) != 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.WithRemainingCount, s); err != nil { + return err + } + } + default: + if strings.Contains(require.Key(), "clusterpedia.io") { + extraLabelRequest = append(extraLabelRequest, require) + } else { + labelRequest = append(labelRequest, require) + } + } + } + } + + out.LabelSelector = nil + if len(labelRequest) != 0 { + out.LabelSelector = labels.NewSelector().Add(labelRequest...) + } + if len(extraLabelRequest) != 0 { + out.ExtraLabelSelector = labels.NewSelector().Add(extraLabelRequest...) + } + } + if out.Before.Before(out.Since) { + return fmt.Errorf("Invalid Query, Since is after Before") + } + if len(in.urlQuery) > 0 { + // Out URLQuery will not be modified, so deepcopy is not used here. + out.URLQuery = in.urlQuery + } + + out.OnlyMetadata = in.OnlyMetadata + return nil +} + +func Convert_clusterpedia_ListOptions_To_v1beta1_ListOptions(in *clusterpedia.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := metainternal.Convert_internalversion_ListOptions_To_v1_ListOptions(&in.ListOptions, &out.ListOptions, s); err != nil { + return err + } + + if err := convert_fields_Selector_To_string(&in.EnhancedFieldSelector, &out.FieldSelector, s); err != nil { + return err + } + + labels := in.LabelSelector.DeepCopySelector() + requirements, _ := in.ExtraLabelSelector.Requirements() + labels.Add(requirements...) + if err := metav1.Convert_labels_Selector_To_string(&labels, &out.ListOptions.LabelSelector, s); err != nil { + return err + } + + out.OwnerUID = in.OwnerUID + out.OwnerName = in.OwnerName + out.OwnerGroupResource = in.OwnerGroupResource.String() + out.OwnerSeniority = in.OwnerSeniority + + if err := convert_Slice_string_To_String(&in.Names, &out.Names, s); err != nil { + return err + } + if err := convert_Slice_string_To_String(&in.ClusterNames, &out.ClusterNames, s); err != nil { + return err + } + if err := convert_Slice_string_To_String(&in.Namespaces, &out.Namespaces, s); err != nil { + return err + } + if err := convert_pedia_Slice_orderby_To_String(&in.OrderBy, &out.OrderBy, s); err != nil { + return err + } + + out.WithContinue = in.WithContinue + out.WithRemainingCount = in.WithRemainingCount + return nil +} + +func Convert_url_Values_To_v1beta1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + if err := metav1.Convert_url_Values_To_v1_ListOptions(in, &out.ListOptions, s); err != nil { + return err + } + // Save the native query parameters for use by listoptions. + out.urlQuery = *in + + return autoConvert_url_Values_To_v1beta1_ListOptions(in, out, s) +} + +func convert_String_To_Slice_string(in *string, out *[]string, scope conversion.Scope) error { + str := strings.TrimSpace(*in) + if str == "" { + *out = nil + return nil + } + + *out = strings.Split(str, ",") + return nil +} + +func convert_Slice_string_To_String(in *[]string, out *string, scope conversion.Scope) error { + if len(*in) == 0 { + *out = "" + return nil + } + *out = strings.Join(*in, ",") + return nil +} + +func convert_String_To_Pointer_metav1_Time(in *string, out **metav1.Time, scope conversion.Scope) error { + str := strings.TrimSpace(*in) + if len(str) == 0 { + return nil + } + + var err error + var t time.Time + switch { + case strings.Contains(str, "T"): + // If the query parameter contains "+", it will be parsed into " ". + // The query parameter need to be encoded. + t, err = time.Parse(time.RFC3339, *in) + case strings.Contains(str, " "): + t, err = time.Parse("2006-01-02 15:04:05", *in) + case strings.Contains(str, "-"): + t, err = time.Parse("2006-01-02", *in) + default: + var timestamp int64 + timestamp, err = strconv.ParseInt(*in, 10, 64) + if err != nil { + break + } + + switch len(str) { + case 10: + t = time.Unix(timestamp, 0) + case 13: + t = time.Unix(timestamp/1e3, (timestamp%1e3)*1e6) + default: + return errors.New("Invalid timestamp: only timestamps with string lengths of 10(as s) and 13(as ms) are supported") + } + } + if err != nil { + return fmt.Errorf("Invalid datetime: %s, a valid datetime format: RFC3339, Datetime(2006-01-02 15:04:05), Date(2006-01-02), Unix Timestamp", *in) + } + *out = &metav1.Time{Time: t} + return nil +} + +func convert_Slice_string_To_clusterpedia_Slice_orderby(in *[]string, out *[]clusterpedia.OrderBy, descSep string, s conversion.Scope) error { + if len(*in) == 0 { + return nil + } + + for _, o := range *in { + sli := strings.Split(strings.TrimSpace(o), descSep) + switch len(sli) { + case 0: + continue + case 1: + *out = append(*out, clusterpedia.OrderBy{Field: sli[0]}) + continue + default: + } + + var desc bool + if sli[len(sli)-1] == "desc" { + desc = true + sli = sli[:len(sli)-1] + } + + // if descSep is " ", `orderby` can only be 'field' or 'field desc' + // example invalid `orderby`: 'field1 field2', 'field1 field2 desc' + if descSep == " " && len(sli) > 1 { + return errors.New("Invalid Query OrderBy") + } + + field := strings.Join(sli, descSep) + *out = append(*out, clusterpedia.OrderBy{Field: field, Desc: desc}) + } + return nil +} + +func convert_pedia_Slice_orderby_To_String(in *[]clusterpedia.OrderBy, out *string, s conversion.Scope) error { + if len(*in) == 0 { + return nil + } + + sliOrderBy := make([]string, len(*in)) + for i, orderby := range *in { + str := orderby.Field + if orderby.Desc { + str += " desc" + } + sliOrderBy[i] = str + } + + if err := convert_Slice_string_To_String(&sliOrderBy, out, s); err != nil { + return err + } + return nil +} + +func convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// nolint:unused +func compileErrorOnMissingConversion() {} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/doc.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/doc.go new file mode 100644 index 000000000..27ed18eba --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/doc.go @@ -0,0 +1,8 @@ +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=github.com/clusterpedia-io/api/clusterpedia +// +k8s:defaulter-gen=TypeMeta +// +groupName=clusterpedia.io + +// Package v1beta1 is the v1beta1 version of the API +package v1beta1 diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/register.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/register.go new file mode 100644 index 000000000..a55ab7197 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/register.go @@ -0,0 +1,56 @@ +package v1beta1 + +import ( + internal "github.com/clusterpedia-io/api/clusterpedia" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var SchemeGroupVersion = schema.GroupVersion{Group: internal.GroupName, Version: "v1beta1"} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + Install = localSchemeBuilder.AddToScheme +) + +func init() { + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CollectionResource{}, + &CollectionResourceList{}, + &Resources{}, + &ListOptions{}, + + &metav1.GetOptions{}, + &metav1.DeleteOptions{}, + &metav1.CreateOptions{}, + &metav1.UpdateOptions{}, + &metav1.PatchOptions{}, + ) + + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind(metav1.WatchEventKind), &metav1.WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: SchemeGroupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(metav1.WatchEventKind), + &metav1.InternalEvent{}, + ) + + scheme.AddUnversionedTypes(metav1.Unversioned, + &metav1.Status{}, + &metav1.APIVersions{}, + &metav1.APIGroupList{}, + &metav1.APIGroup{}, + &metav1.APIResourceList{}, + ) + + utilruntime.Must(metav1.RegisterConversions(scheme)) + utilruntime.Must(metav1.RegisterDefaults(scheme)) + + // metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go new file mode 100644 index 000000000..25a827d17 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go @@ -0,0 +1,110 @@ +package v1beta1 + +import ( + "net/url" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ListOptions struct { + metav1.ListOptions `json:",inline"` + + // +optional + Names string `json:"names,omitempty"` + + // +optional + ClusterNames string `json:"clusters,omitempty"` + + // +optional + Namespaces string `json:"namespaces,omitempty"` + + // +optional + OrderBy string `json:"orderby,omitempty"` + + // +optional + OwnerUID string `json:"ownerUID,omitempty"` + + // +optional + OwnerName string `json:"ownerName,omitempty"` + + // +optional + Since string `json:"since,omitempty"` + + // +optional + Before string `json:"before,omitempty"` + + // +optional + OwnerGroupResource string `json:"ownerGR,omitempty"` + + // +optional + OwnerSeniority int `json:"ownerSeniority,omitempty"` + + // +optional + WithContinue *bool `json:"withContinue,omitempty"` + + // +optional + WithRemainingCount *bool `json:"withRemainingCount,omitempty"` + + // +optional + OnlyMetadata bool `json:"onlyMetadata,omitempty"` + + urlQuery url.Values +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Resources struct { + metav1.TypeMeta `json:",inline"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true + +type CollectionResource struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +required + ResourceTypes []CollectionResourceType `json:"resourceTypes"` + + // +optional + Items []runtime.RawExtension `json:"items,omitempty"` + + // +optional + Continue string `json:"continue,omitempty"` + + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` +} + +type CollectionResourceType struct { + Group string `json:"group"` + + Version string `json:"version"` + + // +optional + Kind string `json:"kind,omitempty"` + + Resource string `json:"resource"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true + +type CollectionResourceList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []CollectionResource `json:"items"` +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..e0c81f294 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go @@ -0,0 +1,338 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + url "net/url" + unsafe "unsafe" + + clusterpedia "github.com/clusterpedia-io/api/clusterpedia" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*CollectionResource)(nil), (*clusterpedia.CollectionResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource(a.(*CollectionResource), b.(*clusterpedia.CollectionResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clusterpedia.CollectionResource)(nil), (*CollectionResource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource(a.(*clusterpedia.CollectionResource), b.(*CollectionResource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CollectionResourceList)(nil), (*clusterpedia.CollectionResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CollectionResourceList_To_clusterpedia_CollectionResourceList(a.(*CollectionResourceList), b.(*clusterpedia.CollectionResourceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clusterpedia.CollectionResourceList)(nil), (*CollectionResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clusterpedia_CollectionResourceList_To_v1beta1_CollectionResourceList(a.(*clusterpedia.CollectionResourceList), b.(*CollectionResourceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*CollectionResourceType)(nil), (*clusterpedia.CollectionResourceType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CollectionResourceType_To_clusterpedia_CollectionResourceType(a.(*CollectionResourceType), b.(*clusterpedia.CollectionResourceType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clusterpedia.CollectionResourceType)(nil), (*CollectionResourceType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clusterpedia_CollectionResourceType_To_v1beta1_CollectionResourceType(a.(*clusterpedia.CollectionResourceType), b.(*CollectionResourceType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1beta1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*clusterpedia.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clusterpedia_ListOptions_To_v1beta1_ListOptions(a.(*clusterpedia.ListOptions), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1beta1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ListOptions)(nil), (*clusterpedia.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ListOptions_To_clusterpedia_ListOptions(a.(*ListOptions), b.(*clusterpedia.ListOptions), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource(in *CollectionResource, out *clusterpedia.CollectionResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.ResourceTypes = *(*[]clusterpedia.CollectionResourceType)(unsafe.Pointer(&in.ResourceTypes)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + out.Continue = in.Continue + out.RemainingItemCount = (*int64)(unsafe.Pointer(in.RemainingItemCount)) + return nil +} + +// Convert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource is an autogenerated conversion function. +func Convert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource(in *CollectionResource, out *clusterpedia.CollectionResource, s conversion.Scope) error { + return autoConvert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource(in, out, s) +} + +func autoConvert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource(in *clusterpedia.CollectionResource, out *CollectionResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.ResourceTypes = *(*[]CollectionResourceType)(unsafe.Pointer(&in.ResourceTypes)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + out.Continue = in.Continue + out.RemainingItemCount = (*int64)(unsafe.Pointer(in.RemainingItemCount)) + return nil +} + +// Convert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource is an autogenerated conversion function. +func Convert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource(in *clusterpedia.CollectionResource, out *CollectionResource, s conversion.Scope) error { + return autoConvert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource(in, out, s) +} + +func autoConvert_v1beta1_CollectionResourceList_To_clusterpedia_CollectionResourceList(in *CollectionResourceList, out *clusterpedia.CollectionResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]clusterpedia.CollectionResource, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CollectionResource_To_clusterpedia_CollectionResource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_CollectionResourceList_To_clusterpedia_CollectionResourceList is an autogenerated conversion function. +func Convert_v1beta1_CollectionResourceList_To_clusterpedia_CollectionResourceList(in *CollectionResourceList, out *clusterpedia.CollectionResourceList, s conversion.Scope) error { + return autoConvert_v1beta1_CollectionResourceList_To_clusterpedia_CollectionResourceList(in, out, s) +} + +func autoConvert_clusterpedia_CollectionResourceList_To_v1beta1_CollectionResourceList(in *clusterpedia.CollectionResourceList, out *CollectionResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CollectionResource, len(*in)) + for i := range *in { + if err := Convert_clusterpedia_CollectionResource_To_v1beta1_CollectionResource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_clusterpedia_CollectionResourceList_To_v1beta1_CollectionResourceList is an autogenerated conversion function. +func Convert_clusterpedia_CollectionResourceList_To_v1beta1_CollectionResourceList(in *clusterpedia.CollectionResourceList, out *CollectionResourceList, s conversion.Scope) error { + return autoConvert_clusterpedia_CollectionResourceList_To_v1beta1_CollectionResourceList(in, out, s) +} + +func autoConvert_v1beta1_CollectionResourceType_To_clusterpedia_CollectionResourceType(in *CollectionResourceType, out *clusterpedia.CollectionResourceType, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + out.Resource = in.Resource + return nil +} + +// Convert_v1beta1_CollectionResourceType_To_clusterpedia_CollectionResourceType is an autogenerated conversion function. +func Convert_v1beta1_CollectionResourceType_To_clusterpedia_CollectionResourceType(in *CollectionResourceType, out *clusterpedia.CollectionResourceType, s conversion.Scope) error { + return autoConvert_v1beta1_CollectionResourceType_To_clusterpedia_CollectionResourceType(in, out, s) +} + +func autoConvert_clusterpedia_CollectionResourceType_To_v1beta1_CollectionResourceType(in *clusterpedia.CollectionResourceType, out *CollectionResourceType, s conversion.Scope) error { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + out.Resource = in.Resource + return nil +} + +// Convert_clusterpedia_CollectionResourceType_To_v1beta1_CollectionResourceType is an autogenerated conversion function. +func Convert_clusterpedia_CollectionResourceType_To_v1beta1_CollectionResourceType(in *clusterpedia.CollectionResourceType, out *CollectionResourceType, s conversion.Scope) error { + return autoConvert_clusterpedia_CollectionResourceType_To_v1beta1_CollectionResourceType(in, out, s) +} + +func autoConvert_v1beta1_ListOptions_To_clusterpedia_ListOptions(in *ListOptions, out *clusterpedia.ListOptions, s conversion.Scope) error { + // FIXME: Provide conversion function to convert v1.ListOptions to internalversion.ListOptions + compileErrorOnMissingConversion() + // WARNING: in.Names requires manual conversion: inconvertible types (string vs []string) + // WARNING: in.ClusterNames requires manual conversion: inconvertible types (string vs []string) + // WARNING: in.Namespaces requires manual conversion: inconvertible types (string vs []string) + // WARNING: in.OrderBy requires manual conversion: inconvertible types (string vs []github.com/clusterpedia-io/api/clusterpedia.OrderBy) + out.OwnerUID = in.OwnerUID + out.OwnerName = in.OwnerName + // WARNING: in.Since requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.Time) + // WARNING: in.Before requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.Time) + // WARNING: in.OwnerGroupResource requires manual conversion: inconvertible types (string vs k8s.io/apimachinery/pkg/runtime/schema.GroupResource) + out.OwnerSeniority = in.OwnerSeniority + out.WithContinue = (*bool)(unsafe.Pointer(in.WithContinue)) + out.WithRemainingCount = (*bool)(unsafe.Pointer(in.WithRemainingCount)) + out.OnlyMetadata = in.OnlyMetadata + // WARNING: in.urlQuery requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_clusterpedia_ListOptions_To_v1beta1_ListOptions(in *clusterpedia.ListOptions, out *ListOptions, s conversion.Scope) error { + // FIXME: Provide conversion function to convert internalversion.ListOptions to v1.ListOptions + compileErrorOnMissingConversion() + if err := runtime.Convert_Slice_string_To_string(&in.Names, &out.Names, s); err != nil { + return err + } + if err := runtime.Convert_Slice_string_To_string(&in.ClusterNames, &out.ClusterNames, s); err != nil { + return err + } + if err := runtime.Convert_Slice_string_To_string(&in.Namespaces, &out.Namespaces, s); err != nil { + return err + } + // WARNING: in.OrderBy requires manual conversion: inconvertible types ([]github.com/clusterpedia-io/api/clusterpedia.OrderBy vs string) + out.OwnerName = in.OwnerName + out.OwnerUID = in.OwnerUID + // WARNING: in.OwnerGroupResource requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/runtime/schema.GroupResource vs string) + out.OwnerSeniority = in.OwnerSeniority + // WARNING: in.Since requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.Time vs string) + // WARNING: in.Before requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.Time vs string) + out.WithContinue = (*bool)(unsafe.Pointer(in.WithContinue)) + out.WithRemainingCount = (*bool)(unsafe.Pointer(in.WithRemainingCount)) + // WARNING: in.EnhancedFieldSelector requires manual conversion: does not exist in peer-type + // WARNING: in.ExtraLabelSelector requires manual conversion: does not exist in peer-type + // WARNING: in.URLQuery requires manual conversion: does not exist in peer-type + out.OnlyMetadata = in.OnlyMetadata + return nil +} + +func autoConvert_url_Values_To_v1beta1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + // WARNING: Field ListOptions does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["names"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Names, s); err != nil { + return err + } + } else { + out.Names = "" + } + if values, ok := map[string][]string(*in)["clusters"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ClusterNames, s); err != nil { + return err + } + } else { + out.ClusterNames = "" + } + if values, ok := map[string][]string(*in)["namespaces"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Namespaces, s); err != nil { + return err + } + } else { + out.Namespaces = "" + } + if values, ok := map[string][]string(*in)["orderby"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.OrderBy, s); err != nil { + return err + } + } else { + out.OrderBy = "" + } + if values, ok := map[string][]string(*in)["ownerUID"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.OwnerUID, s); err != nil { + return err + } + } else { + out.OwnerUID = "" + } + if values, ok := map[string][]string(*in)["ownerName"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.OwnerName, s); err != nil { + return err + } + } else { + out.OwnerName = "" + } + if values, ok := map[string][]string(*in)["since"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Since, s); err != nil { + return err + } + } else { + out.Since = "" + } + if values, ok := map[string][]string(*in)["before"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Before, s); err != nil { + return err + } + } else { + out.Before = "" + } + if values, ok := map[string][]string(*in)["ownerGR"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.OwnerGroupResource, s); err != nil { + return err + } + } else { + out.OwnerGroupResource = "" + } + if values, ok := map[string][]string(*in)["ownerSeniority"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_int(&values, &out.OwnerSeniority, s); err != nil { + return err + } + } else { + out.OwnerSeniority = 0 + } + if values, ok := map[string][]string(*in)["withContinue"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.WithContinue, s); err != nil { + return err + } + } else { + out.WithContinue = nil + } + if values, ok := map[string][]string(*in)["withRemainingCount"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.WithRemainingCount, s); err != nil { + return err + } + } else { + out.WithRemainingCount = nil + } + if values, ok := map[string][]string(*in)["onlyMetadata"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.OnlyMetadata, s); err != nil { + return err + } + } else { + out.OnlyMetadata = false + } + // WARNING: Field urlQuery does not have json tag, skipping. + + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..710cd4bf2 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + url "net/url" + + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResource) DeepCopyInto(out *CollectionResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]CollectionResourceType, len(*in)) + copy(*out, *in) + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResource. +func (in *CollectionResource) DeepCopy() *CollectionResource { + if in == nil { + return nil + } + out := new(CollectionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectionResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResourceList) DeepCopyInto(out *CollectionResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CollectionResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResourceList. +func (in *CollectionResourceList) DeepCopy() *CollectionResourceList { + if in == nil { + return nil + } + out := new(CollectionResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectionResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResourceType) DeepCopyInto(out *CollectionResourceType) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResourceType. +func (in *CollectionResourceType) DeepCopy() *CollectionResourceType { + if in == nil { + return nil + } + out := new(CollectionResourceType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + in.ListOptions.DeepCopyInto(&out.ListOptions) + if in.WithContinue != nil { + in, out := &in.WithContinue, &out.WithContinue + *out = new(bool) + **out = **in + } + if in.WithRemainingCount != nil { + in, out := &in.WithRemainingCount, &out.WithRemainingCount + *out = new(bool) + **out = **in + } + if in.urlQuery != nil { + in, out := &in.urlQuery, &out.urlQuery + *out = make(url.Values, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resources) DeepCopyInto(out *Resources) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in *Resources) DeepCopy() *Resources { + if in == nil { + return nil + } + out := new(Resources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Resources) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/clusterpedia/zz_generated.deepcopy.go b/vendor/github.com/clusterpedia-io/api/clusterpedia/zz_generated.deepcopy.go new file mode 100644 index 000000000..1e5aab42f --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/clusterpedia/zz_generated.deepcopy.go @@ -0,0 +1,207 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clusterpedia + +import ( + url "net/url" + + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResource) DeepCopyInto(out *CollectionResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ResourceTypes != nil { + in, out := &in.ResourceTypes, &out.ResourceTypes + *out = make([]CollectionResourceType, len(*in)) + copy(*out, *in) + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResource. +func (in *CollectionResource) DeepCopy() *CollectionResource { + if in == nil { + return nil + } + out := new(CollectionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectionResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResourceList) DeepCopyInto(out *CollectionResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CollectionResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResourceList. +func (in *CollectionResourceList) DeepCopy() *CollectionResourceList { + if in == nil { + return nil + } + out := new(CollectionResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CollectionResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionResourceType) DeepCopyInto(out *CollectionResourceType) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionResourceType. +func (in *CollectionResourceType) DeepCopy() *CollectionResourceType { + if in == nil { + return nil + } + out := new(CollectionResourceType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + in.ListOptions.DeepCopyInto(&out.ListOptions) + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterNames != nil { + in, out := &in.ClusterNames, &out.ClusterNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OrderBy != nil { + in, out := &in.OrderBy, &out.OrderBy + *out = make([]OrderBy, len(*in)) + copy(*out, *in) + } + out.OwnerGroupResource = in.OwnerGroupResource + if in.Since != nil { + in, out := &in.Since, &out.Since + *out = (*in).DeepCopy() + } + if in.Before != nil { + in, out := &in.Before, &out.Before + *out = (*in).DeepCopy() + } + if in.WithContinue != nil { + in, out := &in.WithContinue, &out.WithContinue + *out = new(bool) + **out = **in + } + if in.WithRemainingCount != nil { + in, out := &in.WithRemainingCount, &out.WithRemainingCount + *out = new(bool) + **out = **in + } + if in.EnhancedFieldSelector != nil { + out.EnhancedFieldSelector = in.EnhancedFieldSelector.DeepCopySelector() + } + if in.ExtraLabelSelector != nil { + out.ExtraLabelSelector = in.ExtraLabelSelector.DeepCopySelector() + } + if in.URLQuery != nil { + in, out := &in.URLQuery, &out.URLQuery + *out = make(url.Values, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrderBy) DeepCopyInto(out *OrderBy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrderBy. +func (in *OrderBy) DeepCopy() *OrderBy { + if in == nil { + return nil + } + out := new(OrderBy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/doc.go b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/doc.go new file mode 100644 index 000000000..ebfdecfc8 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=policy.clusterpedia.io + +// Package v1alpha1 is the v1alpha1 version of the API +package v1alpha1 diff --git a/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/register.go b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/register.go new file mode 100644 index 000000000..bd5a6bd85 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/register.go @@ -0,0 +1,51 @@ +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName specifies the group name used to register the objects. +const GroupName = "policy.clusterpedia.io" + +// GroupVersion specifies the group and the version used to register the objects. +var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// SchemeGroupVersion is group version used to register these objects +// Deprecated: use GroupVersion instead. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // Depreciated: use Install instead + AddToScheme = localSchemeBuilder.AddToScheme + Install = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterImportPolicy{}, + &ClusterImportPolicyList{}, + &PediaClusterLifecycle{}, + &PediaClusterLifecycleList{}, + ) + // AddToGroupVersion allows the serialization of client types like ListOptions. + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/types.go b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/types.go new file mode 100644 index 000000000..c23e99e41 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/types.go @@ -0,0 +1,347 @@ +package v1alpha1 + +import ( + "bytes" + "errors" + "fmt" + "html/template" + "strings" + + "github.com/Masterminds/sprig/v3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + PolicyValidatedCondition = "Validated" + PolicyReconcilingCondition = "Reconciling" + + LifecycleCreatedCondition = "Created" + LifecycleUpdatingCondition = "Updating" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Validated",type=string,JSONPath=".status.conditions[?(@.type == 'Validated')].reason" +// +kubebuilder:printcolumn:name="Reconciling",type=string,JSONPath=".status.conditions[?(@.type == 'Reconciling')].reason" +type ClusterImportPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec ClusterImportPolicySpec `json:"spec,omitempty"` + + // +optional + Status ClusterImportPolicyStatus `json:"status,omitmepty"` +} + +type ClusterImportPolicySpec struct { + // +required + // +kubebuilder:validation:Required + Source SourceType `json:"source"` + + // +optional + // +listType=map + // +listMapKey=key + References []IntendReferenceResourceTemplate `json:"references,omitempty"` + + // +required + // +kubebuilder:validation:Required + NameTemplate LifecycleNameTemplate `json:"nameTemplate"` + + Policy `json:",inline"` +} + +type LifecycleNameTemplate string + +func (t LifecycleNameTemplate) Template() (*template.Template, error) { + return newTemplate("lifecycle-name", string(t)) +} + +type ClusterImportPolicyStatus struct { + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Created",type=string,JSONPath=".status.conditions[?(@.type == 'Created')].reason" +// +kubebuilder:printcolumn:name="Updating",type=string,JSONPath=".status.conditions[?(@.type == 'Updating')].reason" +type PediaClusterLifecycle struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PediaClusterLifecycleSpec `json:"spec,omitempty"` + + // +optional + Status PediaClusterLifecycleStatus `json:"status,omitempty"` +} + +type PediaClusterLifecycleSpec struct { + // +required + // +kubebuilder:validation:Required + Source DependentResource `json:"source"` + + // +optional + // +listType=map + // +listMapKey=key + References []ReferenceResourceTemplate `json:"references,omitempty"` + + Policy `json:",inline"` +} + +type PediaClusterLifecycleStatus struct { + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // +optional + References []DependentResource `json:"references"` +} + +type Policy struct { + // +required + // +kubebuilder:validation:Required + Template string `json:"template"` + + // +required + // +kubebuilder:validation:Required + CreationCondition string `json:"creationCondition"` + + /* + // +required + // +kubebuilder:validation:Required + UpdateTemplate string `json:"updateTemplate,omitempty"` + + // +optional + DeletionCondition string `json:"deletionCondition,omitempty"` + */ +} + +func (policy Policy) Validate() (errs []error) { + if _, err := newTemplate("", policy.Template); err != nil { + errs = append(errs, err) + } + if _, err := newTemplate("", policy.CreationCondition); err != nil { + errs = append(errs, err) + } + + return errs +} + +func (policy Policy) CouldCreate(writer *bytes.Buffer, data interface{}) (bool, error) { + tmpl, err := newTemplate("creationcondition", policy.CreationCondition) + if err != nil { + return false, err + } + + writer.Reset() + if err := tmpl.Execute(writer, data); err != nil { + return false, err + } + return strings.TrimSpace(strings.ToLower(replaceNoValue(writer.String()))) == "true", nil +} + +func (policy Policy) ResolvePediaCluster(writer *bytes.Buffer, data interface{}) ([]byte, error) { + tmpl, err := newTemplate("pediacluster", policy.Template) + if err != nil { + return nil, err + } + + writer.Reset() + if err := tmpl.Execute(writer, data); err != nil { + return nil, err + } + return bytes.ReplaceAll(writer.Bytes(), []byte(""), []byte("")), nil +} + +type BaseReferenceResourceTemplate struct { + // +required + // +kubebuilder:validation:Required + Key string `json:"key"` + + // +required + // +kubebuilder:validation:Required + Group string `json:"group"` + + // +required + // +kubebuilder:validation:Required + Resource string `json:"resource"` + + // +optional + NamespaceTemplate string `json:"namespaceTemplate,omitempty"` + + // +required + // +kubebuilder:validation:Required + NameTemplate string `json:"nameTemplate"` +} + +func (ref BaseReferenceResourceTemplate) GroupResource() schema.GroupResource { + return schema.GroupResource{Group: ref.Group, Resource: ref.Resource} +} + +func (ref BaseReferenceResourceTemplate) ResolveNamespaceAndName(writer *bytes.Buffer, data interface{}) (namespace, name string, err error) { + if ref.NamespaceTemplate != "" { + tmpl, err := newTemplate("references", ref.NamespaceTemplate) + if err != nil { + return "", "", fmt.Errorf("namespace: %w", err) + } + writer.Reset() + if err := tmpl.Execute(writer, data); err != nil { + return "", "", fmt.Errorf("namespace: %w", err) + } + namespace = replaceNoValue(writer.String()) + } + + tmpl, err := newTemplate("references", ref.NameTemplate) + if err != nil { + return "", "", fmt.Errorf("name: %w", err) + } + writer.Reset() + if err := tmpl.Execute(writer, data); err != nil { + return "", "", fmt.Errorf("name: %w", err) + } + name = replaceNoValue(writer.String()) + return +} + +type IntendReferenceResourceTemplate struct { + BaseReferenceResourceTemplate `json:",inline"` + + // +optional + Versions []string `json:"versions,omitempty"` +} + +type ReferenceResourceTemplate struct { + BaseReferenceResourceTemplate `json:",inline"` + + // +required + // +kubebuilder:validation:Required + Version string `json:"version"` +} + +func (r ReferenceResourceTemplate) String() string { + strs := []string{r.Group, r.Version, r.Resource} + if r.NamespaceTemplate != "" { + strs = append(strs, r.NamespaceTemplate) + } + strs = append(strs, r.NameTemplate) + return strings.Join(strs, "/") +} + +func (r ReferenceResourceTemplate) GroupVersionResource() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: r.Group, Version: r.Version, Resource: r.Resource} +} + +func (ref ReferenceResourceTemplate) Validate(errs []error) { + if ref.Key == "" { + errs = append(errs, errors.New("reference resource key is required")) + } + + if ref.NameTemplate == "" { + errs = append(errs, errors.New("reference resource name is required")) + } + return +} + +func (ref ReferenceResourceTemplate) Resolve(writer *bytes.Buffer, data interface{}) (DependentResource, error) { + namespace, name, err := ref.ResolveNamespaceAndName(writer, data) + if err != nil { + return DependentResource{}, err + } + return DependentResource{Group: ref.Group, Version: ref.Version, Resource: ref.Resource, + Namespace: namespace, Name: name}, nil +} + +type SourceType struct { + // +required + // +kubebuilder:validation:Required + Group string `json:"group"` + + // +optional + Versions []string `json:"versions,omitempty"` + + // +required + // +kubebuilder:validation:Required + Resource string `json:"resource"` + + // +optional + SelectorTemplate SelectorTemplate `json:"selectorTemplate,omitempty"` +} + +func (st SourceType) GroupResource() schema.GroupResource { + return schema.GroupResource{Group: st.Group, Resource: st.Resource} +} + +type SelectorTemplate string + +func (t SelectorTemplate) Template() (*template.Template, error) { + return newTemplate("select-source", string(t)) +} + +type DependentResource struct { + // +required + // +kubebuilder:validation:Required + Group string `json:"group"` + + // +required + // +kubebuilder:validation:Required + Version string `json:"version"` + + // +required + // +kubebuilder:validation:Required + Resource string `json:"resource"` + + // +optional + Namespace string `json:"namespace"` + + // +required + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +func (r DependentResource) GroupVersionResource() schema.GroupVersionResource { + return schema.GroupVersionResource{Group: r.Group, Version: r.Version, Resource: r.Resource} +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true + +type ClusterImportPolicyList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ClusterImportPolicy `json:"items"` +} + +// +kubebuilder:object:root=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true + +type PediaClusterLifecycleList struct { + metav1.TypeMeta `json:",inline"` + + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []PediaClusterLifecycle `json:"items"` +} + +func newTemplate(name string, tmpltext string) (*template.Template, error) { + return template.New(name).Funcs(sprig.FuncMap()).Parse(tmpltext) +} + +func replaceNoValue(value string) string { + return strings.ReplaceAll(value, "", "") +} diff --git a/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cf3c97873 --- /dev/null +++ b/vendor/github.com/clusterpedia-io/api/policy/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,340 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseReferenceResourceTemplate) DeepCopyInto(out *BaseReferenceResourceTemplate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseReferenceResourceTemplate. +func (in *BaseReferenceResourceTemplate) DeepCopy() *BaseReferenceResourceTemplate { + if in == nil { + return nil + } + out := new(BaseReferenceResourceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImportPolicy) DeepCopyInto(out *ClusterImportPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImportPolicy. +func (in *ClusterImportPolicy) DeepCopy() *ClusterImportPolicy { + if in == nil { + return nil + } + out := new(ClusterImportPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImportPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImportPolicyList) DeepCopyInto(out *ClusterImportPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImportPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImportPolicyList. +func (in *ClusterImportPolicyList) DeepCopy() *ClusterImportPolicyList { + if in == nil { + return nil + } + out := new(ClusterImportPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImportPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImportPolicySpec) DeepCopyInto(out *ClusterImportPolicySpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.References != nil { + in, out := &in.References, &out.References + *out = make([]IntendReferenceResourceTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Policy = in.Policy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImportPolicySpec. +func (in *ClusterImportPolicySpec) DeepCopy() *ClusterImportPolicySpec { + if in == nil { + return nil + } + out := new(ClusterImportPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImportPolicyStatus) DeepCopyInto(out *ClusterImportPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImportPolicyStatus. +func (in *ClusterImportPolicyStatus) DeepCopy() *ClusterImportPolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImportPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DependentResource) DeepCopyInto(out *DependentResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentResource. +func (in *DependentResource) DeepCopy() *DependentResource { + if in == nil { + return nil + } + out := new(DependentResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntendReferenceResourceTemplate) DeepCopyInto(out *IntendReferenceResourceTemplate) { + *out = *in + out.BaseReferenceResourceTemplate = in.BaseReferenceResourceTemplate + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntendReferenceResourceTemplate. +func (in *IntendReferenceResourceTemplate) DeepCopy() *IntendReferenceResourceTemplate { + if in == nil { + return nil + } + out := new(IntendReferenceResourceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaClusterLifecycle) DeepCopyInto(out *PediaClusterLifecycle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaClusterLifecycle. +func (in *PediaClusterLifecycle) DeepCopy() *PediaClusterLifecycle { + if in == nil { + return nil + } + out := new(PediaClusterLifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PediaClusterLifecycle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaClusterLifecycleList) DeepCopyInto(out *PediaClusterLifecycleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PediaClusterLifecycle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaClusterLifecycleList. +func (in *PediaClusterLifecycleList) DeepCopy() *PediaClusterLifecycleList { + if in == nil { + return nil + } + out := new(PediaClusterLifecycleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PediaClusterLifecycleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaClusterLifecycleSpec) DeepCopyInto(out *PediaClusterLifecycleSpec) { + *out = *in + out.Source = in.Source + if in.References != nil { + in, out := &in.References, &out.References + *out = make([]ReferenceResourceTemplate, len(*in)) + copy(*out, *in) + } + out.Policy = in.Policy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaClusterLifecycleSpec. +func (in *PediaClusterLifecycleSpec) DeepCopy() *PediaClusterLifecycleSpec { + if in == nil { + return nil + } + out := new(PediaClusterLifecycleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PediaClusterLifecycleStatus) DeepCopyInto(out *PediaClusterLifecycleStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.References != nil { + in, out := &in.References, &out.References + *out = make([]DependentResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PediaClusterLifecycleStatus. +func (in *PediaClusterLifecycleStatus) DeepCopy() *PediaClusterLifecycleStatus { + if in == nil { + return nil + } + out := new(PediaClusterLifecycleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReferenceResourceTemplate) DeepCopyInto(out *ReferenceResourceTemplate) { + *out = *in + out.BaseReferenceResourceTemplate = in.BaseReferenceResourceTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReferenceResourceTemplate. +func (in *ReferenceResourceTemplate) DeepCopy() *ReferenceResourceTemplate { + if in == nil { + return nil + } + out := new(ReferenceResourceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceType) DeepCopyInto(out *SourceType) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceType. +func (in *SourceType) DeepCopy() *SourceType { + if in == nil { + return nil + } + out := new(SourceType) + in.DeepCopyInto(out) + return out +}