diff --git a/.golangci.yml b/.golangci.yml index 669b0575e..3b0be526c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -66,7 +66,7 @@ linters: enable: - bodyclose - deadcode - #- depguard + - depguard - dogsled - dupl - errcheck diff --git a/cmd/insights-operator/main.go b/cmd/insights-operator/main.go index 1cf31d597..a25d1280e 100644 --- a/cmd/insights-operator/main.go +++ b/cmd/insights-operator/main.go @@ -60,7 +60,6 @@ func NewOperatorCommand() *cobra.Command { cmd.AddCommand(start.NewOperator()) cmd.AddCommand(start.NewReceiver()) cmd.AddCommand(start.NewGather()) - cmd.AddCommand(start.NewGatherAndUpload()) return cmd } diff --git a/cmd/obfuscate-archive/main.go b/cmd/obfuscate-archive/main.go index e9cf85daf..464d09319 100644 --- a/cmd/obfuscate-archive/main.go +++ b/cmd/obfuscate-archive/main.go @@ -10,7 +10,6 @@ import ( "strings" configv1 "github.com/openshift/api/config/v1" - "github.com/openshift/api/insights/v1alpha1" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/gather" "github.com/openshift/insights-operator/pkg/record" @@ -62,7 +61,7 @@ func obfuscateArchive(path string) (string, error) { return "", err } - anonymizer, err := anonymization.NewAnonymizer(clusterBaseDomain, networks, nil, nil, v1alpha1.ObfuscateNetworking) + anonymizer, err := anonymization.NewAnonymizer(clusterBaseDomain, networks, nil, nil, nil) if err != nil { return "", err } diff --git a/manifests/03-clusterrole.yaml b/manifests/03-clusterrole.yaml index 4c3bf3f02..3318805db 100644 --- a/manifests/03-clusterrole.yaml +++ b/manifests/03-clusterrole.yaml @@ -43,19 +43,6 @@ metadata: include.release.openshift.io/single-node-developer: "true" capability.openshift.io/name: Insights rules: - - apiGroups: - - "insights.openshift.io" - resources: - - datagathers - - datagathers/status - verbs: - - create - - get - - update - - patch - - list - - delete - - watch - apiGroups: - "operator.openshift.io" resources: @@ -390,22 +377,6 @@ rules: - pods verbs: - get - - apiGroups: - - batch - resources: - - jobs - verbs: - - create - - get - - list - - delete - - apiGroups: - - apps - resources: - - deployments - verbs: - - get - --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/manifests/05-prometheus_role.yaml b/manifests/03-prometheus_role.yaml similarity index 100% rename from manifests/05-prometheus_role.yaml rename to manifests/03-prometheus_role.yaml diff --git a/manifests/04-datagather-insights-crd.yaml b/manifests/04-datagather-insights-crd.yaml deleted file mode 100644 index e2645bcd9..000000000 --- a/manifests/04-datagather-insights-crd.yaml +++ /dev/null @@ -1,323 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1365 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: datagathers.insights.openshift.io -spec: - group: insights.openshift.io - names: - kind: DataGather - listKind: DataGatherList - plural: datagathers - singular: datagather - scope: Cluster - versions: - - additionalPrinterColumns: - - description: DataGather job state - jsonPath: .status.dataGatherState - name: State - type: string - - description: DataGather start time - jsonPath: .status.startTime - name: StartTime - type: date - - description: DataGather finish time - jsonPath: .status.finishTime - name: FinishTime - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: "DataGather provides data gather configuration options and status for the particular Insights data gathering. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - dataPolicy: - description: dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are "ClearText" and "ObfuscateNetworking". When set to ClearText the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is ClearText. - type: string - enum: - - "" - - ClearText - - ObfuscateNetworking - gatherers: - description: 'gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: "oc get insightsoperators.operator.openshift.io cluster -o json | jq ''.status.gatherStatus.gatherers[].name''"' - type: array - items: - description: gathererConfig allows to configure specific gatherers - type: object - required: - - name - properties: - name: - description: name is the name of specific gatherer - type: string - state: - description: state allows you to configure specific gatherer. Valid values are "Enabled", "Disabled" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default. The current default is Enabled. - type: string - enum: - - "" - - Enabled - - Disabled - status: - description: status holds observed values from the cluster. They may not be overridden. - type: object - properties: - conditions: - description: conditions provide details on the status of the gatherer job. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - dataGatherState: - description: dataGatherState reflects the current state of the data gathering process. - type: string - enum: - - Running - - Completed - - Failed - - Pending - x-kubernetes-validations: - - rule: '!(oldSelf == ''Running'' && self == ''Pending'')' - message: dataGatherState cannot transition from Running to Pending - - rule: '!(oldSelf == ''Completed'' && self == ''Pending'')' - message: dataGatherState cannot transition from Completed to Pending - - rule: '!(oldSelf == ''Failed'' && self == ''Pending'')' - message: dataGatherState cannot transition from Failed to Pending - - rule: '!(oldSelf == ''Completed'' && self == ''Running'')' - message: dataGatherState cannot transition from Completed to Running - - rule: '!(oldSelf == ''Failed'' && self == ''Running'')' - message: dataGatherState cannot transition from Failed to Running - finishTime: - description: finishTime is the time when Insights data gathering finished. - type: string - format: date-time - x-kubernetes-validations: - - rule: self == oldSelf - message: finishTime is immutable once set - gatherers: - description: gatherers is a list of active gatherers (and their statuses) in the last gathering. - type: array - items: - description: gathererStatus represents information about a particular data gatherer. - type: object - required: - - conditions - - lastGatherDuration - - name - properties: - conditions: - description: conditions provide details on the status of each gatherer. - type: array - minItems: 1 - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastGatherDuration: - description: lastGatherDuration represents the time spent gathering. - type: string - pattern: ^([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ - name: - description: name is the name of the gatherer. - type: string - maxLength: 256 - minLength: 5 - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - insightsReport: - description: insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by "insightsRequestID") is not available. - type: object - properties: - downloadedAt: - description: downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). - type: string - format: date-time - healthChecks: - description: healthChecks provides basic information about active Insights health checks in a cluster. - type: array - items: - description: healthCheck represents an Insights health check attributes. - type: object - required: - - advisorURI - - description - - state - - totalRisk - properties: - advisorURI: - description: advisorURI provides the URL link to the Insights Advisor. - type: string - pattern: ^https:\/\/\S+ - description: - description: description provides basic description of the healtcheck. - type: string - maxLength: 2048 - minLength: 10 - state: - description: state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. - type: string - enum: - - Enabled - - Disabled - totalRisk: - description: totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. - type: integer - format: int32 - maximum: 4 - minimum: 1 - x-kubernetes-list-type: atomic - uri: - description: uri provides the URL link from which the report was downloaded. - type: string - pattern: ^https:\/\/\S+ - insightsRequestID: - description: insightsRequestID is an Insights request ID to track the status of the Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive. - type: string - x-kubernetes-validations: - - rule: self == oldSelf - message: insightsRequestID is immutable once set - relatedObjects: - description: relatedObjects is a list of resources which are useful when debugging or inspecting the data gathering Pod - type: array - items: - description: ObjectReference contains enough information to let you inspect or modify the referred object. - type: object - required: - - group - - name - - resource - properties: - group: - description: 'group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc.' - type: string - pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: 'resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc.' - type: string - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - startTime: - description: startTime is the time when Insights data gathering started. - type: string - format: date-time - x-kubernetes-validations: - - rule: self == oldSelf - message: startTime is immutable once set - x-kubernetes-validations: - - rule: (!has(oldSelf.insightsRequestID) || has(self.insightsRequestID)) - message: cannot remove insightsRequestID attribute from status - - rule: (!has(oldSelf.startTime) || has(self.startTime)) - message: cannot remove startTime attribute from status - - rule: (!has(oldSelf.finishTime) || has(self.finishTime)) - message: cannot remove finishTime attribute from status - - rule: (!has(oldSelf.dataGatherState) || has(self.dataGatherState)) - message: cannot remove dataGatherState attribute from status - served: true - storage: true - subresources: - status: {} diff --git a/manifests/03-serviceaccount.yaml b/manifests/04-serviceaccount.yaml similarity index 100% rename from manifests/03-serviceaccount.yaml rename to manifests/04-serviceaccount.yaml diff --git a/manifests/06-deployment-ibm-cloud-managed.yaml b/manifests/05-deployment-ibm-cloud-managed.yaml similarity index 100% rename from manifests/06-deployment-ibm-cloud-managed.yaml rename to manifests/05-deployment-ibm-cloud-managed.yaml diff --git a/manifests/06-deployment.yaml b/manifests/05-deployment.yaml similarity index 100% rename from manifests/06-deployment.yaml rename to manifests/05-deployment.yaml diff --git a/manifests/03-operator-crd.yaml b/manifests/08-operator-crd.yaml similarity index 100% rename from manifests/03-operator-crd.yaml rename to manifests/08-operator-crd.yaml diff --git a/manifests/04-operator-cr.yaml b/manifests/09-operator-cr.yaml similarity index 100% rename from manifests/04-operator-cr.yaml rename to manifests/09-operator-cr.yaml diff --git a/manifests/08-prometheus_rule.yaml b/manifests/10-prometheus_rule.yaml similarity index 100% rename from manifests/08-prometheus_rule.yaml rename to manifests/10-prometheus_rule.yaml diff --git a/manifests/09-servicemonitor.yaml b/manifests/11-servicemonitor.yaml similarity index 100% rename from manifests/09-servicemonitor.yaml rename to manifests/11-servicemonitor.yaml diff --git a/pkg/anonymization/anonymizer.go b/pkg/anonymization/anonymizer.go index 7380f6b8f..ace6e230f 100644 --- a/pkg/anonymization/anonymizer.go +++ b/pkg/anonymization/anonymizer.go @@ -33,7 +33,7 @@ import ( "sync" configv1 "github.com/openshift/api/config/v1" - "github.com/openshift/api/insights/v1alpha1" + "github.com/openshift/api/config/v1alpha1" networkv1 "github.com/openshift/api/network/v1" configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" networkv1client "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1" @@ -88,7 +88,7 @@ type Anonymizer struct { ipNetworkRegex *regexp.Regexp secretsClient corev1client.SecretInterface secretConfigurator configobserver.Configurator - dataPolicy v1alpha1.DataPolicy + apiConfigurator configobserver.InsightsDataGatherObserver configClient configv1client.ConfigV1Interface networkClient networkv1client.NetworkV1Interface gatherKubeClient kubernetes.Interface @@ -104,7 +104,7 @@ func NewAnonymizer(clusterBaseDomain string, networks []string, secretsClient corev1client.SecretInterface, secretConfigurator configobserver.Configurator, - dataPolicy v1alpha1.DataPolicy) (*Anonymizer, error) { + apiConfigurator configobserver.InsightsDataGatherObserver) (*Anonymizer, error) { cidrs, err := k8snet.ParseCIDRs(networks) if err != nil { return nil, err @@ -126,7 +126,7 @@ func NewAnonymizer(clusterBaseDomain string, ipNetworkRegex: regexp.MustCompile(Ipv4AddressOrNetworkRegex), secretsClient: secretsClient, secretConfigurator: secretConfigurator, - dataPolicy: dataPolicy, + apiConfigurator: apiConfigurator, }, nil } @@ -138,14 +138,14 @@ func NewAnonymizerFromConfigClient( configClient configv1client.ConfigV1Interface, networkClient networkv1client.NetworkV1Interface, secretConfigurator configobserver.Configurator, - dataPolicy v1alpha1.DataPolicy, + apiConfigurator configobserver.InsightsDataGatherObserver, ) (*Anonymizer, error) { baseDomain, err := utils.GetClusterBaseDomain(ctx, configClient) if err != nil { return nil, err } secretsClient := kubeClient.CoreV1().Secrets(secretNamespace) - a, err := NewAnonymizer(baseDomain, []string{}, secretsClient, secretConfigurator, dataPolicy) + a, err := NewAnonymizer(baseDomain, []string{}, secretsClient, secretConfigurator, apiConfigurator) if err != nil { return nil, err } @@ -322,7 +322,7 @@ func NewAnonymizerFromConfig( gatherProtoKubeConfig *rest.Config, protoKubeConfig *rest.Config, secretConfigurator configobserver.Configurator, - dataPolicy v1alpha1.DataPolicy, + apiConfigurator configobserver.InsightsDataGatherObserver, ) (*Anonymizer, error) { kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) if err != nil { @@ -344,7 +344,7 @@ func NewAnonymizerFromConfig( return nil, err } - return NewAnonymizerFromConfigClient(ctx, kubeClient, gatherKubeClient, configClient, networkClient, secretConfigurator, dataPolicy) + return NewAnonymizerFromConfigClient(ctx, kubeClient, gatherKubeClient, configClient, networkClient, secretConfigurator, apiConfigurator) } // AnonymizeMemoryRecord takes record.MemoryRecord, removes the sensitive data from it and returns the same object @@ -484,8 +484,8 @@ func (anonymizer *Anonymizer) IsObfuscationEnabled() bool { if anonymizer.secretConfigurator.Config().EnableGlobalObfuscation { return true } - if anonymizer.dataPolicy != "" { - return anonymizer.dataPolicy == v1alpha1.ObfuscateNetworking + if anonymizer.apiConfigurator != nil { + return *anonymizer.apiConfigurator.GatherDataPolicy() == v1alpha1.ObfuscateNetworking } return false } diff --git a/pkg/anonymization/anonymizer_test.go b/pkg/anonymization/anonymizer_test.go index ced16d9de..809ea26ed 100644 --- a/pkg/anonymization/anonymizer_test.go +++ b/pkg/anonymization/anonymizer_test.go @@ -7,7 +7,7 @@ import ( "testing" configv1 "github.com/openshift/api/config/v1" - "github.com/openshift/api/insights/v1alpha1" + "github.com/openshift/api/config/v1alpha1" networkv1 "github.com/openshift/api/network/v1" configfake "github.com/openshift/client-go/config/clientset/versioned/fake" networkfake "github.com/openshift/client-go/network/clientset/versioned/fake" @@ -123,8 +123,11 @@ func getAnonymizer(t *testing.T) *Anonymizer { mockSecretConfigurator := config.NewMockSecretConfigurator(&config.Controller{ EnableGlobalObfuscation: true, }) + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{ + DataPolicy: v1alpha1.ObfuscateNetworking, + }) anonymizer, err := NewAnonymizer(clusterBaseDomain, - networks, kubefake.NewSimpleClientset().CoreV1().Secrets(secretNamespace), mockSecretConfigurator, v1alpha1.ObfuscateNetworking) + networks, kubefake.NewSimpleClientset().CoreV1().Secrets(secretNamespace), mockSecretConfigurator, mockAPIConfigurator) assert.NoError(t, err) return anonymizer @@ -329,7 +332,10 @@ func TestAnonymizer_NewAnonymizerFromConfigClient(t *testing.T) { networkClient, config.NewMockSecretConfigurator(&config.Controller{ EnableGlobalObfuscation: true, - }), v1alpha1.ObfuscateNetworking, + }), + config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{ + DataPolicy: v1alpha1.ObfuscateNetworking, + }), ) assert.NoError(t, err) assert.NotNil(t, anonymizer) diff --git a/pkg/authorizer/clusterauthorizer/clusterauthorizer_test.go b/pkg/authorizer/clusterauthorizer/clusterauthorizer_test.go index be5322572..9b24b3633 100644 --- a/pkg/authorizer/clusterauthorizer/clusterauthorizer_test.go +++ b/pkg/authorizer/clusterauthorizer/clusterauthorizer_test.go @@ -100,7 +100,7 @@ func Test_Proxy(tt *testing.T) { co2 := &config.MockSecretConfigurator{Conf: &config.Controller{HTTPConfig: tc.HTTPConfig}} a := Authorizer{proxyFromEnvironment: nonCachedProxyFromEnvironment(), configurator: co2} p := a.NewSystemOrConfiguredProxy() - req := httptest.NewRequest("GET", tc.RequestURL, http.NoBody) + req := httptest.NewRequest("GET", tc.RequestURL, nil) urlRec, err := p(req) if err != nil { diff --git a/pkg/cmd/start/start.go b/pkg/cmd/start/start.go index afed4a09e..a38c01c0f 100644 --- a/pkg/cmd/start/start.go +++ b/pkg/cmd/start/start.go @@ -25,11 +25,7 @@ import ( "github.com/openshift/insights-operator/pkg/controller" ) -const ( - serviceCACertPath = "/var/run/configmaps/service-ca-bundle/service-ca.crt" - pbContentType = "application/vnd.kubernetes.protobuf" - pbAcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" -) +const serviceCACertPath = "/var/run/configmaps/service-ca-bundle/service-ca.crt" // NewOperator create the commad for running the Insights Operator. func NewOperator() *cobra.Command { @@ -82,29 +78,6 @@ func NewGather() *cobra.Command { return cmd } -func NewGatherAndUpload() *cobra.Command { - operator := &controller.GatherJob{ - Controller: config.Controller{ - ConditionalGathererEndpoint: "https://console.redhat.com/api/gathering/gathering_rules", - StoragePath: "/var/lib/insights-operator", - Interval: 2 * time.Hour, - Endpoint: "https://console.redhat.com/api/ingress/v1/upload", - ReportEndpoint: "https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports", - ReportPullingDelay: 60 * time.Second, - ReportMinRetryTime: 10 * time.Second, - ReportPullingTimeout: 30 * time.Minute, - }, - } - cfg := controllercmd.NewControllerCommandConfig("openshift-insights-operator", version.Get(), nil) - cmd := &cobra.Command{ - Use: "gather-and-upload", - Short: "Runs the data gathering as job, uploads the data, waits for Insights analysis report and ends", - Run: runGatherAndUpload(operator, cfg), - } - cmd.Flags().AddFlagSet(cfg.NewCommand().Flags()) - return cmd -} - // Starts a single gather, main responsibility is loading in the necessary configs. func runGather(operator *controller.GatherJob, cfg *controllercmd.ControllerCommandConfig) func(cmd *cobra.Command, args []string) { return func(cmd *cobra.Command, args []string) { @@ -142,8 +115,8 @@ func runGather(operator *controller.GatherJob, cfg *controllercmd.ControllerComm } } protoConfig := rest.CopyConfig(clientConfig) - protoConfig.AcceptContentTypes = pbAcceptContentTypes - protoConfig.ContentType = pbContentType + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" ctx, cancel := context.WithTimeout(context.Background(), operator.Interval) @@ -246,52 +219,3 @@ func runOperator(operator *controller.Operator, cfg *controllercmd.ControllerCom } } } - -// Starts a single gather, main responsibility is loading in the necessary configs. -func runGatherAndUpload(operator *controller.GatherJob, - cfg *controllercmd.ControllerCommandConfig) func(cmd *cobra.Command, args []string) { - return func(cmd *cobra.Command, args []string) { - if configArg := cmd.Flags().Lookup("config").Value.String(); len(configArg) == 0 { - klog.Exit("error: --config is required") - } - unstructured, _, _, err := cfg.Config() - if err != nil { - klog.Exit(err) - } - cont, err := config.LoadConfig(operator.Controller, unstructured.Object, config.ToDisconnectedController) - if err != nil { - klog.Exit(err) - } - operator.Controller = cont - - var clientConfig *rest.Config - if kubeConfigPath := cmd.Flags().Lookup("kubeconfig").Value.String(); len(kubeConfigPath) > 0 { - kubeConfigBytes, err := os.ReadFile(kubeConfigPath) //nolint: govet - if err != nil { - klog.Exit(err) - } - kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) - if err != nil { - klog.Exit(err) - } - clientConfig, err = kubeConfig.ClientConfig() - if err != nil { - klog.Exit(err) - } - } else { - clientConfig, err = rest.InClusterConfig() - if err != nil { - klog.Exit(err) - } - } - protoConfig := rest.CopyConfig(clientConfig) - protoConfig.AcceptContentTypes = pbAcceptContentTypes - protoConfig.ContentType = pbContentType - - err = operator.GatherAndUpload(clientConfig, protoConfig) - if err != nil { - klog.Exit(err) - } - os.Exit(0) - } -} diff --git a/pkg/config/configobserver/insighgtsdatagather_observer.go b/pkg/config/configobserver/insighgtsdatagather_observer.go index 230e10d9f..0c0a57e61 100644 --- a/pkg/config/configobserver/insighgtsdatagather_observer.go +++ b/pkg/config/configobserver/insighgtsdatagather_observer.go @@ -56,7 +56,7 @@ func NewInsightsDataGatherObserver(kubeConfig *rest.Config, return c, nil } -func (i *insightsDataGatherController) sync(ctx context.Context, _ factory.SyncContext) error { +func (i *insightsDataGatherController) sync(ctx context.Context, scx factory.SyncContext) error { insightDataGatherConf, err := i.configV1Alpha1Cli.InsightsDataGathers().Get(ctx, "cluster", metav1.GetOptions{}) if err != nil { return err diff --git a/pkg/controller/gather_commands.go b/pkg/controller/gather_commands.go deleted file mode 100644 index 79f88cb94..000000000 --- a/pkg/controller/gather_commands.go +++ /dev/null @@ -1,258 +0,0 @@ -package controller - -import ( - "context" - "fmt" - "os" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/pkg/version" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - - insightsv1alpha1 "github.com/openshift/api/insights/v1alpha1" - configv1client "github.com/openshift/client-go/config/clientset/versioned" - insightsv1alpha1cli "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" - "github.com/openshift/insights-operator/pkg/anonymization" - "github.com/openshift/insights-operator/pkg/authorizer/clusterauthorizer" - "github.com/openshift/insights-operator/pkg/config" - "github.com/openshift/insights-operator/pkg/config/configobserver" - "github.com/openshift/insights-operator/pkg/controller/status" - "github.com/openshift/insights-operator/pkg/gather" - "github.com/openshift/insights-operator/pkg/insights/insightsclient" - "github.com/openshift/insights-operator/pkg/insights/insightsuploader" - "github.com/openshift/insights-operator/pkg/recorder" - "github.com/openshift/insights-operator/pkg/recorder/diskrecorder" -) - -// GatherJob is the type responsible for controlling a non-periodic Gather execution -type GatherJob struct { - config.Controller - InsightsConfigAPIEnabled bool -} - -// Gather runs a single gather and stores the generated archive, without uploading it. -// 1. Creates the necessary configs/clients -// 2. Creates the configobserver to get more configs -// 3. Initiates the recorder -// 4. Executes a Gather -// 5. Flushes the results -func (d *GatherJob) Gather(ctx context.Context, kubeConfig, protoKubeConfig *rest.Config) error { - klog.Infof("Starting insights-operator %s", version.Get().String()) - // these are operator clients - kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) - if err != nil { - return err - } - - gatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs( - protoKubeConfig, kubeConfig, d.Impersonate, - ) - - // ensure the insight snapshot directory exists - if _, err = os.Stat(d.StoragePath); err != nil && os.IsNotExist(err) { - if err = os.MkdirAll(d.StoragePath, 0777); err != nil { - return fmt.Errorf("can't create --path: %v", err) - } - } - - // configobserver synthesizes all config into the status reporter controller - configObserver := configobserver.New(d.Controller, kubeClient) - - // anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization - anonymizer, err := anonymization.NewAnonymizerFromConfig( - ctx, gatherKubeConfig, gatherProtoKubeConfig, protoKubeConfig, configObserver, "") - if err != nil { - return err - } - - // the recorder stores the collected data and we flush at the end. - recdriver := diskrecorder.New(d.StoragePath) - rec := recorder.New(recdriver, d.Interval, anonymizer) - defer func() { - if err = rec.Flush(); err != nil { - klog.Error(err) - } - }() - - authorizer := clusterauthorizer.New(configObserver) - - // gatherConfigClient is configClient created from gatherKubeConfig, this name was used because configClient was already taken - // this client is only used in insightsClient, it is created here - // because pkg/insights/insightsclient/request_test.go unit test won't work otherwise - gatherConfigClient, err := configv1client.NewForConfig(gatherKubeConfig) - if err != nil { - return err - } - - insightsClient := insightsclient.New(nil, 0, "default", authorizer, gatherConfigClient) - gatherers := gather.CreateAllGatherers( - gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer, - configObserver, insightsClient, - ) - - allFunctionReports := make(map[string]gather.GathererFunctionReport) - for _, gatherer := range gatherers { - functionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, rec, nil) - if err != nil { - klog.Errorf("unable to process gatherer %v, error: %v", gatherer.GetName(), err) - } - - for i := range functionReports { - allFunctionReports[functionReports[i].FuncName] = functionReports[i] - } - } - - return gather.RecordArchiveMetadata(mapToArray(allFunctionReports), rec, anonymizer) -} - -// GatherAndUpload runs a single gather and stores the generated archive, uploads it -// and waits for the corresponding Insights analysis report. -// 1. Creates the necessary configs/clients -// 2. Creates the configobserver -// 3. Initiates the recorder -// 4. Executes a Gather -// 5. Flushes the results -// 6. Get the latest archive -// 7. Uploads the archive -// 8. Waits for the corresponding Insights analysis download -func (d *GatherJob) GatherAndUpload(kubeConfig, protoKubeConfig *rest.Config) error { // nolint: funlen, gocyclo - klog.Infof("Starting insights-operator %s", version.Get().String()) - // these are operator clients - kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) - if err != nil { - return err - } - - insightClient, err := insightsv1alpha1cli.NewForConfig(kubeConfig) - if err != nil { - return err - } - - gatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs( - protoKubeConfig, kubeConfig, d.Impersonate, - ) - - // The reason for using longer context is that the upload can fail and then there is the exponential backoff - // See the insightsuploader Upload method - ctx, cancel := context.WithTimeout(context.Background(), d.Interval*4) - defer cancel() - dataGatherCR, err := insightClient.DataGathers().Get(ctx, os.Getenv("DATAGATHER_NAME"), metav1.GetOptions{}) - if err != nil { - klog.Error("failed to get coresponding DataGather custom resource: %v", err) - return err - } - updatedCR := dataGatherCR.DeepCopy() - updatedCR.Status.State = insightsv1alpha1.Running - updatedCR.Status.StartTime = metav1.Now() - - dataGatherCR, err = insightClient.DataGathers().UpdateStatus(ctx, updatedCR, metav1.UpdateOptions{}) - if err != nil { - klog.Error("failed to update coresponding DataGather custom resource: %v", err) - return err - } - - // ensure the insight snapshot directory exists - if _, err = os.Stat(d.StoragePath); err != nil && os.IsNotExist(err) { - if err = os.MkdirAll(d.StoragePath, 0777); err != nil { - return fmt.Errorf("can't create --path: %v", err) - } - } - - // configobserver synthesizes all config into the status reporter controller - configObserver := configobserver.New(d.Controller, kubeClient) - // anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization - anonymizer, err := anonymization.NewAnonymizerFromConfig( - ctx, gatherKubeConfig, gatherProtoKubeConfig, protoKubeConfig, configObserver, dataGatherCR.Spec.DataPolicy) - if err != nil { - return err - } - - // the recorder stores the collected data and we flush at the end. - recdriver := diskrecorder.New(d.StoragePath) - rec := recorder.New(recdriver, d.Interval, anonymizer) - authorizer := clusterauthorizer.New(configObserver) - - configClient, err := configv1client.NewForConfig(gatherKubeConfig) - if err != nil { - return err - } - insightsClient := insightsclient.New(nil, 0, "default", authorizer, configClient) - - gatherers := gather.CreateAllGatherers( - gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer, - configObserver, insightsClient, - ) - uploader := insightsuploader.New(nil, insightsClient, configObserver, nil, nil, 0) - - allFunctionReports := make(map[string]gather.GathererFunctionReport) - for _, gatherer := range gatherers { - functionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, rec, dataGatherCR.Spec.Gatherers) // nolint: govet - if err != nil { - klog.Errorf("unable to process gatherer %v, error: %v", gatherer.GetName(), err) - } - - for i := range functionReports { - allFunctionReports[functionReports[i].FuncName] = functionReports[i] - } - } - err = gather.RecordArchiveMetadata(mapToArray(allFunctionReports), rec, anonymizer) - if err != nil { - klog.Error(err) - return err - } - err = rec.Flush() - if err != nil { - klog.Error(err) - return err - } - lastArchive, err := recdriver.LastArchive() - if err != nil { - klog.Error(err) - return err - } - insightsRequestID, err := uploader.Upload(ctx, lastArchive) - if err != nil { - klog.Error(err) - return err - } - klog.Infof("Insights archive successfully uploaded with InsightsRequestID: %s", insightsRequestID) - - dataGatherCR.Status.FinishTime = metav1.Now() - dataGatherCR.Status.State = insightsv1alpha1.Completed - dataGatherCR.Status.InsightsRequestID = insightsRequestID - dataGatherCR.Status.Conditions = []metav1.Condition{ - { - Type: "DataUploaded", - Status: metav1.ConditionTrue, - Reason: "AsExpected", - LastTransitionTime: metav1.Now(), - }, - } - for k := range allFunctionReports { - fr := allFunctionReports[k] - // duration = 0 means the gatherer didn't run - if fr.Duration == 0 { - continue - } - - gs := status.CreateDataGatherGathererStatus(&fr) - dataGatherCR.Status.Gatherers = append(dataGatherCR.Status.Gatherers, gs) - } - _, err = insightClient.DataGathers().UpdateStatus(ctx, dataGatherCR, metav1.UpdateOptions{}) - if err != nil { - klog.Error(err) - return err - } - // TODO use the InsightsRequestID to query the new aggregator API - return nil -} - -func mapToArray(m map[string]gather.GathererFunctionReport) []gather.GathererFunctionReport { - a := make([]gather.GathererFunctionReport, 0, len(m)) - for _, v := range m { - a = append(a, v) - } - return a -} diff --git a/pkg/controller/gather_job.go b/pkg/controller/gather_job.go new file mode 100644 index 000000000..a926badbd --- /dev/null +++ b/pkg/controller/gather_job.go @@ -0,0 +1,127 @@ +package controller + +import ( + "context" + "fmt" + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/pkg/version" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + "github.com/openshift/api/config/v1alpha1" + configv1client "github.com/openshift/client-go/config/clientset/versioned" + "github.com/openshift/insights-operator/pkg/anonymization" + "github.com/openshift/insights-operator/pkg/authorizer/clusterauthorizer" + "github.com/openshift/insights-operator/pkg/config" + "github.com/openshift/insights-operator/pkg/config/configobserver" + "github.com/openshift/insights-operator/pkg/gather" + "github.com/openshift/insights-operator/pkg/insights/insightsclient" + "github.com/openshift/insights-operator/pkg/recorder" + "github.com/openshift/insights-operator/pkg/recorder/diskrecorder" +) + +// GatherJob is the type responsible for controlling a non-periodic Gather execution +type GatherJob struct { + config.Controller + InsightsConfigAPIEnabled bool +} + +// Gather runs a single gather and stores the generated archive, without uploading it. +// 1. Creates the necessary configs/clients +// 2. Creates the configobserver to get more configs +// 3. Initiates the recorder +// 4. Executes a Gather +// 5. Flushes the results +func (d *GatherJob) Gather(ctx context.Context, kubeConfig, protoKubeConfig *rest.Config) error { + klog.Infof("Starting insights-operator %s", version.Get().String()) + // these are operator clients + kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) + if err != nil { + return err + } + + configClient, err := configv1client.NewForConfig(kubeConfig) + if err != nil { + return err + } + + gatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs( + protoKubeConfig, kubeConfig, d.Impersonate, + ) + + var gatherConfig v1alpha1.GatherConfig + if d.InsightsConfigAPIEnabled { + insightsDataGather, err := configClient.ConfigV1alpha1().InsightsDataGathers().Get(ctx, "cluster", metav1.GetOptions{}) //nolint: govet + if err != nil { + return err + } + gatherConfig = insightsDataGather.Spec.GatherConfig + } + + // ensure the insight snapshot directory exists + if _, err = os.Stat(d.StoragePath); err != nil && os.IsNotExist(err) { + if err = os.MkdirAll(d.StoragePath, 0777); err != nil { + return fmt.Errorf("can't create --path: %v", err) + } + } + + // configobserver synthesizes all config into the status reporter controller + configObserver := configobserver.New(d.Controller, kubeClient) + + // anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization + anonymizer, err := anonymization.NewAnonymizerFromConfig( + ctx, gatherKubeConfig, gatherProtoKubeConfig, protoKubeConfig, configObserver, nil) + if err != nil { + return err + } + + // the recorder stores the collected data and we flush at the end. + recdriver := diskrecorder.New(d.StoragePath) + rec := recorder.New(recdriver, d.Interval, anonymizer) + defer func() { + if err = rec.Flush(); err != nil { + klog.Error(err) + } + }() + + authorizer := clusterauthorizer.New(configObserver) + + // gatherConfigClient is configClient created from gatherKubeConfig, this name was used because configClient was already taken + // this client is only used in insightsClient, it is created here + // because pkg/insights/insightsclient/request_test.go unit test won't work otherwise + gatherConfigClient, err := configv1client.NewForConfig(gatherKubeConfig) + if err != nil { + return err + } + + insightsClient := insightsclient.New(nil, 0, "default", authorizer, gatherConfigClient) + gatherers := gather.CreateAllGatherers( + gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer, + configObserver, insightsClient, + ) + + allFunctionReports := make(map[string]gather.GathererFunctionReport) + for _, gatherer := range gatherers { + functionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, rec, &gatherConfig) + if err != nil { + klog.Errorf("unable to process gatherer %v, error: %v", gatherer.GetName(), err) + } + + for i := range functionReports { + allFunctionReports[functionReports[i].FuncName] = functionReports[i] + } + } + + return gather.RecordArchiveMetadata(mapToArray(allFunctionReports), rec, anonymizer) +} + +func mapToArray(m map[string]gather.GathererFunctionReport) []gather.GathererFunctionReport { + a := make([]gather.GathererFunctionReport, 0, len(m)) + for _, v := range m { + a = append(a, v) + } + return a +} diff --git a/pkg/controller/operator.go b/pkg/controller/operator.go index 3b70fe19d..d782c524a 100644 --- a/pkg/controller/operator.go +++ b/pkg/controller/operator.go @@ -9,7 +9,6 @@ import ( v1 "github.com/openshift/api/config/v1" configv1client "github.com/openshift/client-go/config/clientset/versioned" configv1informers "github.com/openshift/client-go/config/informers/externalversions" - insightsv1alpha1cli "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" operatorv1client "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" "github.com/openshift/library-go/pkg/controller/controllercmd" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" @@ -74,11 +73,6 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller return err } - insightClient, err := insightsv1alpha1cli.NewForConfig(controller.KubeConfig) - if err != nil { - return err - } - gatherProtoKubeConfig, gatherKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig := prepareGatherConfigs( controller.ProtoKubeConfig, controller.KubeConfig, s.Impersonate, ) @@ -145,30 +139,23 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller // the status controller initializes the cluster operator object and retrieves // the last sync time, if any was set - statusReporter := status.NewController(configClient.ConfigV1(), secretConfigObserver, - insightsDataGatherObserver, os.Getenv("POD_NAMESPACE")) - - var anonymizer *anonymization.Anonymizer - var recdriver *diskrecorder.DiskRecorder - var rec *recorder.Recorder - // if techPreview is enabled we switch to separate job and we don't need anything from this - if !insightsConfigAPIEnabled { - // anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization - anonymizer, err = anonymization.NewAnonymizerFromConfig(ctx, gatherKubeConfig, - gatherProtoKubeConfig, controller.ProtoKubeConfig, secretConfigObserver, "") - if err != nil { - // in case of an error anonymizer will be nil and anonymization will be just skipped - klog.Errorf(anonymization.UnableToCreateAnonymizerErrorMessage, err) - return err - } + statusReporter := status.NewController(configClient.ConfigV1(), secretConfigObserver, insightsDataGatherObserver, os.Getenv("POD_NAMESPACE")) - // the recorder periodically flushes any recorded data to disk as tar.gz files - // in s.StoragePath, and also prunes files above a certain age - recdriver = diskrecorder.New(s.StoragePath) - rec = recorder.New(recdriver, s.Interval, anonymizer) - go rec.PeriodicallyPrune(ctx, statusReporter) + // anonymizer is responsible for anonymizing sensitive data, it can be configured to disable specific anonymization + anonymizer, err := anonymization.NewAnonymizerFromConfig(ctx, gatherKubeConfig, + gatherProtoKubeConfig, controller.ProtoKubeConfig, secretConfigObserver, insightsDataGatherObserver) + if err != nil { + // in case of an error anonymizer will be nil and anonymization will be just skipped + klog.Errorf(anonymization.UnableToCreateAnonymizerErrorMessage, err) + return err } + // the recorder periodically flushes any recorded data to disk as tar.gz files + // in s.StoragePath, and also prunes files above a certain age + recdriver := diskrecorder.New(s.StoragePath) + rec := recorder.New(recdriver, s.Interval, anonymizer) + go rec.PeriodicallyPrune(ctx, statusReporter) + authorizer := clusterauthorizer.New(secretConfigObserver) // gatherConfigClient is configClient created from gatherKubeConfig, this name was used because configClient was already taken @@ -181,23 +168,14 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller insightsClient := insightsclient.New(nil, 0, "default", authorizer, gatherConfigClient) - var periodicGather *periodic.Controller // the gatherers are periodically called to collect the data from the cluster // and provide the results for the recorder gatherers := gather.CreateAllGatherers( gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, alertsGatherKubeConfig, anonymizer, secretConfigObserver, insightsClient, ) - if !insightsConfigAPIEnabled { - periodicGather = periodic.New(secretConfigObserver, rec, gatherers, anonymizer, - operatorClient.InsightsOperators(), kubeClient) - statusReporter.AddSources(periodicGather.Sources()...) - } else { - reportRetriever := insightsreport.NewWithTechPreview(insightsClient, secretConfigObserver, operatorClient.InsightsOperators()) - periodicGather = periodic.NewWithTechPreview(reportRetriever, secretConfigObserver, - insightsDataGatherObserver, gatherers, kubeClient, insightClient, operatorClient.InsightsOperators()) - go periodicGather.PeriodicPrune(ctx) - } + periodicGather := periodic.New(secretConfigObserver, rec, gatherers, anonymizer, operatorClient.InsightsOperators(), insightsDataGatherObserver) + statusReporter.AddSources(periodicGather.Sources()...) // check we can read IO container status and we are not in crash loop initialCheckTimeout := s.Controller.Interval / 24 @@ -208,28 +186,24 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller initialDelay = wait.Jitter(baseInitialDelay, 0.5) klog.Infof("Unable to check insights-operator pod status. Setting initial delay to %s", initialDelay) } - go periodicGather.Run(ctx.Done(), initialDelay, insightsConfigAPIEnabled) - - if !insightsConfigAPIEnabled { - // upload results to the provided client - if no client is configured reporting - // is permanently disabled, but if a client does exist the server may still disable reporting - uploader := insightsuploader.New(recdriver, insightsClient, secretConfigObserver, - insightsDataGatherObserver, statusReporter, initialDelay) - statusReporter.AddSources(uploader) - - // start uploading status, so that we - // know any previous last reported time - go uploader.Run(ctx) - - reportGatherer := insightsreport.New(insightsClient, secretConfigObserver, uploader, operatorClient.InsightsOperators()) - statusReporter.AddSources(reportGatherer) - go reportGatherer.Run(ctx) - } + go periodicGather.Run(ctx.Done(), initialDelay) + + // upload results to the provided client - if no client is configured reporting + // is permanently disabled, but if a client does exist the server may still disable reporting + uploader := insightsuploader.New(recdriver, insightsClient, secretConfigObserver, insightsDataGatherObserver, statusReporter, initialDelay) + statusReporter.AddSources(uploader) // start reporting status now that all controller loops are added as sources if err = statusReporter.Start(ctx); err != nil { return fmt.Errorf("unable to set initial cluster status: %v", err) } + // start uploading status, so that we + // know any previous last reported time + go uploader.Run(ctx) + + reportGatherer := insightsreport.New(insightsClient, secretConfigObserver, uploader, operatorClient.InsightsOperators()) + statusReporter.AddSources(reportGatherer) + go reportGatherer.Run(ctx) scaController := initiateSCAController(ctx, kubeClient, secretConfigObserver, insightsClient) if scaController != nil { diff --git a/pkg/controller/periodic/job.go b/pkg/controller/periodic/job.go deleted file mode 100644 index d3d72f293..000000000 --- a/pkg/controller/periodic/job.go +++ /dev/null @@ -1,125 +0,0 @@ -package periodic - -import ( - "context" - "fmt" - "time" - - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -// JobController type responsible for -// creating a new gathering jobs -type JobController struct { - kubeClient kubernetes.Interface -} - -func NewJobController(kubeClient kubernetes.Interface) *JobController { - return &JobController{ - kubeClient: kubeClient, - } -} - -// CreateGathererJob creates a new Kubernetes Job with provided image, volume mount path used for storing data archives and name -// derived from the provided data gather name -func (j *JobController) CreateGathererJob(ctx context.Context, dataGatherName, image, archiveVolumeMountPath string) (*batchv1.Job, error) { - gj := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: dataGatherName, - Namespace: insightsNamespace, - }, - Spec: batchv1.JobSpec{ - // backoff limit is 0 - we dont' want to restart the gathering immediately in case of failure - BackoffLimit: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - ServiceAccountName: "operator", - SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: &trueB, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "archives-path", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - { - Name: serviceCABundle, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: serviceCABundle, - }, - Optional: &trueB, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "insights-gathering", - Image: image, - Args: []string{"gather-and-upload", "-v=4", "--config=/etc/insights-operator/server.yaml"}, - Env: []corev1.EnvVar{ - { - Name: "DATAGATHER_NAME", - Value: dataGatherName, - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("70Mi"), - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: falseB, - Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "archives-path", - MountPath: archiveVolumeMountPath, - }, - { - Name: serviceCABundle, - MountPath: serviceCABundlePath, - }, - }, - }, - }, - }, - }, - }, - } - - return j.kubeClient.BatchV1().Jobs(insightsNamespace).Create(ctx, gj, metav1.CreateOptions{}) -} - -// WaitForJobCompletion polls the Kubernetes API every 20 seconds and checks if the job finished. -func (j *JobController) WaitForJobCompletion(ctx context.Context, job *batchv1.Job) error { - return wait.PollUntilContextCancel(ctx, 20*time.Second, true, func(ctx context.Context) (done bool, err error) { - j, err := j.kubeClient.BatchV1().Jobs(insightsNamespace).Get(ctx, job.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err - } - if j.Status.Succeeded > 0 { - return true, nil - } - if j.Status.Failed > 0 { - return true, fmt.Errorf("job %s failed", job.Name) - } - return false, nil - }) -} diff --git a/pkg/controller/periodic/job_test.go b/pkg/controller/periodic/job_test.go deleted file mode 100644 index 9f72a0f9e..000000000 --- a/pkg/controller/periodic/job_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package periodic - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - kubefake "k8s.io/client-go/kubernetes/fake" -) - -func TestCreateGathererJob(t *testing.T) { - tests := []struct { - name string - dataGatherName string - imageName string - volumeMountPath string - }{ - { - name: "Basic gathering job creation", - dataGatherName: "custom-gather-xyz", - imageName: "test.io/test/insights-image", - volumeMountPath: "/var/lib/test-io/path", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cs := kubefake.NewSimpleClientset() - jc := NewJobController(cs) - createdJob, err := jc.CreateGathererJob(context.Background(), tt.dataGatherName, tt.imageName, tt.volumeMountPath) - assert.NoError(t, err) - assert.Equal(t, tt.dataGatherName, createdJob.Name) - assert.Len(t, createdJob.Spec.Template.Spec.Containers, 1) - assert.Equal(t, tt.imageName, createdJob.Spec.Template.Spec.Containers[0].Image) - // we mount to volumes - assert.Len(t, createdJob.Spec.Template.Spec.Containers[0].VolumeMounts, 2) - assert.Equal(t, tt.volumeMountPath, createdJob.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) - }) - } -} diff --git a/pkg/controller/periodic/periodic.go b/pkg/controller/periodic/periodic.go index b06b0abb1..0b95c3a55 100644 --- a/pkg/controller/periodic/periodic.go +++ b/pkg/controller/periodic/periodic.go @@ -4,36 +4,36 @@ import ( "context" "fmt" "sort" + "strings" "time" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" - insightsv1alpha1 "github.com/openshift/api/insights/v1alpha1" v1 "github.com/openshift/api/operator/v1" - insightsv1alpha1cli "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" operatorv1client "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/config/configobserver" - "github.com/openshift/insights-operator/pkg/controller/status" "github.com/openshift/insights-operator/pkg/controllerstatus" "github.com/openshift/insights-operator/pkg/gather" "github.com/openshift/insights-operator/pkg/gatherers" - "github.com/openshift/insights-operator/pkg/insights/insightsreport" "github.com/openshift/insights-operator/pkg/recorder" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var ( - serviceCABundle = "service-ca-bundle" - serviceCABundlePath = "/var/run/configmaps/service-ca-bundle" - insightsNamespace = "openshift-insights" - falseB = new(bool) - trueB = true - deletePropagationBackground = metav1.DeletePropagationBackground +const ( + DataGatheredCondition = "DataGathered" + // NoDataGathered is a reason when there is no data gathered - e.g the resource is not in a cluster + NoDataGatheredReason = "NoData" + // Error is a reason when there is some error and no data gathered + GatherErrorReason = "GatherError" + // Panic is a reason when there is some error and no data gathered + GatherPanicReason = "GatherPanic" + // GatheredOK is a reason when data is gathered as expected + GatheredOKReason = "GatheredOK" + // GatheredWithError is a reason when data is gathered partially or with another error message + GatheredWithErrorReason = "GatheredWithError" ) // Controller periodically runs gatherers, records their results to the recorder @@ -46,37 +46,6 @@ type Controller struct { statuses map[string]controllerstatus.StatusController anonymizer *anonymization.Anonymizer insightsOperatorCLI operatorv1client.InsightsOperatorInterface - dataGatherClient insightsv1alpha1cli.InsightsV1alpha1Interface - kubeClient kubernetes.Interface - reportRetriever *insightsreport.Controller - image string - jobController *JobController - pruneInterval time.Duration -} - -func NewWithTechPreview( - reportRetriever *insightsreport.Controller, - secretConfigurator configobserver.Configurator, - apiConfigurator configobserver.InsightsDataGatherObserver, - listGatherers []gatherers.Interface, - kubeClient kubernetes.Interface, - dataGatherClient insightsv1alpha1cli.InsightsV1alpha1Interface, - insightsOperatorCLI operatorv1client.InsightsOperatorInterface, -) *Controller { - statuses := make(map[string]controllerstatus.StatusController) - jobController := NewJobController(kubeClient) - return &Controller{ - reportRetriever: reportRetriever, - secretConfigurator: secretConfigurator, - apiConfigurator: apiConfigurator, - gatherers: listGatherers, - statuses: statuses, - kubeClient: kubeClient, - dataGatherClient: dataGatherClient, - jobController: jobController, - insightsOperatorCLI: insightsOperatorCLI, - pruneInterval: 1 * time.Hour, - } } // New creates a new instance of Controller which periodically invokes the gatherers @@ -87,7 +56,7 @@ func New( listGatherers []gatherers.Interface, anonymizer *anonymization.Anonymizer, insightsOperatorCLI operatorv1client.InsightsOperatorInterface, - kubeClient *kubernetes.Clientset, + apiConfigurator configobserver.InsightsDataGatherObserver, ) *Controller { statuses := make(map[string]controllerstatus.StatusController) @@ -98,12 +67,12 @@ func New( return &Controller{ secretConfigurator: secretConfigurator, + apiConfigurator: apiConfigurator, recorder: rec, gatherers: listGatherers, statuses: statuses, anonymizer: anonymizer, insightsOperatorCLI: insightsOperatorCLI, - kubeClient: kubeClient, } } @@ -120,7 +89,7 @@ func (c *Controller) Sources() []controllerstatus.StatusController { return sources } -func (c *Controller) Run(stopCh <-chan struct{}, initialDelay time.Duration, techPreview bool) { +func (c *Controller) Run(stopCh <-chan struct{}, initialDelay time.Duration) { defer utilruntime.HandleCrash() defer klog.Info("Shutting down") @@ -130,21 +99,13 @@ func (c *Controller) Run(stopCh <-chan struct{}, initialDelay time.Duration, tec case <-stopCh: return case <-time.After(initialDelay): - if techPreview { - c.GatherJob() - } else { - c.Gather() - } - } - } else { - if techPreview { - c.GatherJob() - } else { c.Gather() } + } else { + c.Gather() } - go wait.Until(func() { c.periodicTrigger(stopCh, techPreview) }, time.Second, stopCh) + go wait.Until(func() { c.periodicTrigger(stopCh) }, time.Second, stopCh) <-stopCh } @@ -176,9 +137,6 @@ func (c *Controller) Gather() { } } - ctx, cancel := context.WithTimeout(context.Background(), c.secretConfigurator.Config().Interval) - defer cancel() - allFunctionReports := make(map[string]gather.GathererFunctionReport) gatherTime := metav1.Now() for _, gatherer := range gatherersToProcess { @@ -186,8 +144,17 @@ func (c *Controller) Gather() { name := gatherer.GetName() start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), c.secretConfigurator.Config().Interval/2) + defer cancel() + klog.V(4).Infof("Running %s gatherer", gatherer.GetName()) - functionReports, err := gather.CollectAndRecordGatherer(ctx, gatherer, c.recorder, nil) + var functionReports []gather.GathererFunctionReport + var err error + if c.apiConfigurator != nil { + functionReports, err = gather.CollectAndRecordGatherer(ctx, gatherer, c.recorder, c.apiConfigurator.GatherConfig()) + } else { + functionReports, err = gather.CollectAndRecordGatherer(ctx, gatherer, c.recorder, nil) + } for i := range functionReports { allFunctionReports[functionReports[i].FuncName] = functionReports[i] } @@ -205,7 +172,7 @@ func (c *Controller) Gather() { }) }() } - err := c.updateOperatorStatusCR(ctx, allFunctionReports, gatherTime) + err := c.updateOperatorStatusCR(allFunctionReports, gatherTime) if err != nil { klog.Errorf("failed to update the Insights Operator CR status: %v", err) } @@ -217,7 +184,7 @@ func (c *Controller) Gather() { // Periodically starts the gathering. // If there is an initialDelay set then it waits that much for the first gather to happen. -func (c *Controller) periodicTrigger(stopCh <-chan struct{}, techPreview bool) { +func (c *Controller) periodicTrigger(stopCh <-chan struct{}) { configCh, closeFn := c.secretConfigurator.ConfigChanged() defer closeFn() @@ -237,94 +204,15 @@ func (c *Controller) periodicTrigger(stopCh <-chan struct{}, techPreview bool) { klog.Infof("Gathering cluster info every %s", interval) case <-time.After(interval): - if techPreview { - c.GatherJob() - } else { - c.Gather() - } - } - } -} - -func (c *Controller) GatherJob() { - if c.isGatheringDisabled() { - klog.V(3).Info("Gather is disabled by configuration.") - return - } - ctx, cancel := context.WithTimeout(context.Background(), c.secretConfigurator.Config().Interval*4) - defer cancel() - - if c.image == "" { - image, err := c.getInsightsImage(ctx) - if err != nil { - klog.Errorf("Can't get operator image. Gathering will not run: %v", err) - return - } - c.image = image - } - - // create a new datagather.insights.openshift.io custom resource - disabledGatherers, dp := c.createDataGatherAttributeValues() - dataGatherCR, err := c.createNewDataGatherCR(ctx, disabledGatherers, dp) - if err != nil { - klog.Errorf("Failed to create a new DataGather resource: %v", err) - return - } - - // create a new periodic gathering job - gj, err := c.jobController.CreateGathererJob(ctx, dataGatherCR.Name, c.image, c.secretConfigurator.Config().StoragePath) - if err != nil { - klog.Errorf("Failed to create a new job: %v", err) - return - } - - klog.Infof("Created new gathering job %v", gj.Name) - err = c.jobController.WaitForJobCompletion(ctx, gj) - if err != nil { - if err == context.DeadlineExceeded { - klog.Errorf("Failed to read job status: %v", err) - return + c.Gather() } - klog.Error(err) - } - klog.Infof("Job completed %s", gj.Name) - c.reportRetriever.RetrieveReport() - _, err = c.copyDataGatherStatusToOperatorStatus(ctx, dataGatherCR.Name) - if err != nil { - klog.Errorf("Failed to copy the last DataGather status to \"cluster\" operator status: %v", err) - return - } - klog.Info("Operator status in \"insightsoperator.operator.openshift.io\" successfully updated") -} - -// copyDataGatherStatusToOperatorStatus gets the "cluster" "insightsoperator.operator.openshift.io" resource -// and updates its status with values from the provided "dgName" "datagather.insights.openshift.io" resource. -func (c *Controller) copyDataGatherStatusToOperatorStatus(ctx context.Context, dgName string) (*v1.InsightsOperator, error) { - operator, err := c.insightsOperatorCLI.Get(ctx, "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err } - statusToUpdate := operator.Status.DeepCopy() - - dataGather, err := c.dataGatherClient.DataGathers().Get(ctx, dgName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - statusToUpdate.GatherStatus = status.DataGatherStatusToOperatorGatherStatus(&dataGather.Status) - operator.Status = *statusToUpdate - - _, err = c.insightsOperatorCLI.UpdateStatus(ctx, operator, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - return operator, nil } // updateOperatorStatusCR gets the 'cluster' insightsoperators.operator.openshift.io resource and updates its status with the last // gathering details. -func (c *Controller) updateOperatorStatusCR(ctx context.Context, allFunctionReports map[string]gather.GathererFunctionReport, - gatherTime metav1.Time) error { - insightsOperatorCR, err := c.insightsOperatorCLI.Get(ctx, "cluster", metav1.GetOptions{}) +func (c *Controller) updateOperatorStatusCR(allFunctionReports map[string]gather.GathererFunctionReport, gatherTime metav1.Time) error { + insightsOperatorCR, err := c.insightsOperatorCLI.Get(context.Background(), "cluster", metav1.GetOptions{}) if err != nil { return err } @@ -344,21 +232,17 @@ func (c *Controller) updateOperatorStatusCR(ctx context.Context, allFunctionRepo continue } - gs := status.CreateOperatorGathererStatus(&fr) + gs := createGathererStatus(&fr) updatedOperatorCR.Status.GatherStatus.Gatherers = append(updatedOperatorCR.Status.GatherStatus.Gatherers, gs) } - _, err = c.insightsOperatorCLI.UpdateStatus(ctx, updatedOperatorCR, metav1.UpdateOptions{}) + _, err = c.insightsOperatorCLI.UpdateStatus(context.Background(), updatedOperatorCR, metav1.UpdateOptions{}) if err != nil { return err } return nil } -// isGatheringDisabled checks and returns whether the data gathering -// is disabled or not. There are two options to disable it: -// - removing the corresponding token from pull-secret (the first and original option) -// - configure it in the "insightsdatagather.config.openshift.io" CR func (c *Controller) isGatheringDisabled() bool { // old way of disabling data gathering by removing // the "cloud.openshift.com" token from the pull-secret @@ -374,123 +258,48 @@ func (c *Controller) isGatheringDisabled() bool { return false } -// getInsightsImage reads "insights-operator" deployment and gets the image from the first container -func (c *Controller) getInsightsImage(ctx context.Context) (string, error) { - insightsDeployment, err := c.kubeClient.AppsV1().Deployments(insightsNamespace). - Get(ctx, "insights-operator", metav1.GetOptions{}) - if err != nil { - return "", err +func createGathererStatus(gfr *gather.GathererFunctionReport) v1.GathererStatus { + gs := v1.GathererStatus{ + Name: gfr.FuncName, + LastGatherDuration: metav1.Duration{ + // v.Duration is in milliseconds and we need nanoseconds + Duration: time.Duration(gfr.Duration * 1000000), + }, } - containers := insightsDeployment.Spec.Template.Spec.Containers - if len(containers) == 0 { - return "", fmt.Errorf("no container defined in the deployment") + con := metav1.Condition{ + Type: DataGatheredCondition, + LastTransitionTime: metav1.Now(), + Status: metav1.ConditionFalse, + Reason: NoDataGatheredReason, } - return containers[0].Image, nil -} -// PeriodicPrune runs periodically and deletes jobs (including the related pods) -// and "datagather.insights.openshift.io" resources older than 24 hours -func (c *Controller) PeriodicPrune(ctx context.Context) { - klog.Infof("Pruning old jobs every %s", c.pruneInterval) - for { - select { - case <-ctx.Done(): - return - case <-time.After(c.pruneInterval): - klog.Info("Pruning the jobs and datagather resources") - // prune old jobs - jobs, err := c.kubeClient.BatchV1().Jobs(insightsNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - klog.Error(err) - } - for i := range jobs.Items { - job := jobs.Items[i] - // TODO the time duration should be configurable - if time.Since(job.CreationTimestamp.Time) > 24*time.Hour { - err = c.kubeClient.BatchV1().Jobs(insightsNamespace).Delete(ctx, job.Name, metav1.DeleteOptions{ - PropagationPolicy: &deletePropagationBackground, - }) - if err != nil { - klog.Errorf("Failed to delete job %s: %v", job.Name, err) - continue - } - klog.Infof("Job %s successfully removed", job.Name) - } - } - // prune old DataGather custom resources - dataGatherCRs, err := c.dataGatherClient.DataGathers().List(ctx, metav1.ListOptions{}) - if err != nil { - klog.Error(err) - } - for i := range dataGatherCRs.Items { - dataGatherCR := dataGatherCRs.Items[i] - if time.Since(dataGatherCR.CreationTimestamp.Time) > 24*time.Hour { - err = c.dataGatherClient.DataGathers().Delete(ctx, dataGatherCR.Name, metav1.DeleteOptions{}) - if err != nil { - klog.Errorf("Failed to delete DataGather custom resources %s: %v", dataGatherCR.Name, err) - continue - } - klog.Infof("DataGather %s resource successfully removed", dataGatherCR.Name) - } - } - } + if gfr.Panic != nil { + con.Reason = GatherPanicReason + con.Message = gfr.Panic.(string) } -} -// createNewDataGatherCR creates a new "datagather.insights.openshift.io" custom resource -// with generate name prefix "periodic-gathering-". Returns the name of the newly created -// resource -func (c *Controller) createNewDataGatherCR(ctx context.Context, disabledGatherers []string, - dataPolicy insightsv1alpha1.DataPolicy) (*insightsv1alpha1.DataGather, error) { - dataGatherCR := insightsv1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "periodic-gathering-", - }, - Spec: insightsv1alpha1.DataGatherSpec{ - DataPolicy: dataPolicy, - }, - } - for _, g := range disabledGatherers { - dataGatherCR.Spec.Gatherers = append(dataGatherCR.Spec.Gatherers, insightsv1alpha1.GathererConfig{ - Name: g, - State: insightsv1alpha1.Disabled, - }) - } - dataGather, err := c.dataGatherClient.DataGathers().Create(ctx, &dataGatherCR, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - klog.Infof("Created a new %s DataGather custom resource", dataGather.Name) - return dataGather, nil -} + if gfr.RecordsCount > 0 { + con.Status = metav1.ConditionTrue + con.Reason = GatheredOKReason + con.Message = fmt.Sprintf("Created %d records in the archive.", gfr.RecordsCount) + + if len(gfr.Errors) > 0 { + con.Reason = GatheredWithErrorReason + con.Message = fmt.Sprintf("%s Error: %s", con.Message, strings.Join(gfr.Errors, ",")) + } -// createDataGatherAttributeValues reads the current "insightsdatagather.config.openshift.io" configuration -// and checks custom period gatherers and returns list of disabled gatherers based on this two values -// and also data policy set in the "insightsdatagather.config.openshift.io" -func (c *Controller) createDataGatherAttributeValues() ([]string, insightsv1alpha1.DataPolicy) { - gatherConfig := c.apiConfigurator.GatherConfig() - - var dp insightsv1alpha1.DataPolicy - switch gatherConfig.DataPolicy { - case "": - dp = insightsv1alpha1.NoPolicy - case configv1alpha1.NoPolicy: - dp = insightsv1alpha1.NoPolicy - case configv1alpha1.ObfuscateNetworking: - dp = insightsv1alpha1.ObfuscateNetworking + gs.Conditions = append(gs.Conditions, con) + return gs } - disabledGatherers := gatherConfig.DisabledGatherers - for _, gatherer := range c.gatherers { - if g, ok := gatherer.(gatherers.CustomPeriodGatherer); ok { - if !g.ShouldBeProcessedNow() { - disabledGatherers = append(disabledGatherers, g.GetName()) - } else { - g.UpdateLastProcessingTime() - } - } + if len(gfr.Errors) > 0 { + con.Reason = GatherErrorReason + con.Message = strings.Join(gfr.Errors, ",") } - return disabledGatherers, dp + + gs.Conditions = append(gs.Conditions, con) + + return gs } func mapToArray(m map[string]gather.GathererFunctionReport) []gather.GathererFunctionReport { diff --git a/pkg/controller/periodic/periodic_test.go b/pkg/controller/periodic/periodic_test.go index ac8167d7b..9257e3cf9 100644 --- a/pkg/controller/periodic/periodic_test.go +++ b/pkg/controller/periodic/periodic_test.go @@ -1,7 +1,6 @@ package periodic import ( - "context" "encoding/json" "fmt" "testing" @@ -9,23 +8,15 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - configv1alpha1 "github.com/openshift/api/config/v1alpha1" - "github.com/openshift/api/insights/v1alpha1" - operatorv1 "github.com/openshift/api/operator/v1" - insightsFakeCli "github.com/openshift/client-go/insights/clientset/versioned/fake" + "github.com/openshift/api/config/v1alpha1" + v1 "github.com/openshift/api/operator/v1" fakeOperatorCli "github.com/openshift/client-go/operator/clientset/versioned/fake" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/config" - "github.com/openshift/insights-operator/pkg/controller/status" "github.com/openshift/insights-operator/pkg/gather" "github.com/openshift/insights-operator/pkg/gatherers" "github.com/openshift/insights-operator/pkg/recorder" - appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - kubefake "k8s.io/client-go/kubernetes/fake" ) func Test_Controller_CustomPeriodGatherer(t *testing.T) { @@ -79,7 +70,7 @@ func Test_Controller_Run(t *testing.T) { }, 1*time.Hour) assert.NoError(t, err) stopCh := make(chan struct{}) - go c.Run(stopCh, tt.initialDelay, false) + go c.Run(stopCh, tt.initialDelay) if _, ok := <-time.After(tt.waitTime); ok { stopCh <- struct{}{} } @@ -116,7 +107,7 @@ func Test_Controller_periodicTrigger(t *testing.T) { }, tt.interval) assert.NoError(t, err) stopCh := make(chan struct{}) - go c.periodicTrigger(stopCh, false) + go c.periodicTrigger(stopCh) if _, ok := <-time.After(tt.waitTime); ok { stopCh <- struct{}{} } @@ -207,589 +198,149 @@ func getMocksForPeriodicTest(listGatherers []gatherers.Interface, interval time. Report: true, Interval: interval, }} + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{}) mockRecorder := recorder.MockRecorder{} - mockAnonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, &mockConfigurator, "") + mockAnonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, &mockConfigurator, mockAPIConfigurator) if err != nil { return nil, nil, err } fakeInsightsOperatorCli := fakeOperatorCli.NewSimpleClientset().OperatorV1().InsightsOperators() - mockController := New(&mockConfigurator, &mockRecorder, listGatherers, mockAnonymizer, fakeInsightsOperatorCli, nil) + mockController := New(&mockConfigurator, &mockRecorder, listGatherers, mockAnonymizer, fakeInsightsOperatorCli, mockAPIConfigurator) return mockController, &mockRecorder, nil } -func TestCreateNewDataGatherCR(t *testing.T) { - cs := insightsFakeCli.NewSimpleClientset() - mockController := NewWithTechPreview(nil, nil, nil, nil, nil, cs.InsightsV1alpha1(), nil) +func Test_createGathererStatus(t *testing.T) { //nolint: funlen tests := []struct { - name string - disabledGatherers []string - dataPolicy v1alpha1.DataPolicy - expected *v1alpha1.DataGather + name string + gfr gather.GathererFunctionReport + expectedGs v1.GathererStatus }{ { - name: "Empty DataGather resource creation", - disabledGatherers: []string{}, - dataPolicy: "", - expected: &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "periodic-gathering-", - }, - Spec: v1alpha1.DataGatherSpec{ - DataPolicy: "", - }, - }, - }, - { - name: "DataGather with NoPolicy DataPolicy", - disabledGatherers: []string{}, - dataPolicy: v1alpha1.NoPolicy, - expected: &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "periodic-gathering-", - }, - Spec: v1alpha1.DataGatherSpec{ - DataPolicy: "ClearText", - }, + name: "Data gathered OK", + gfr: gather.GathererFunctionReport{ + FuncName: "gatherer1/foo", + Duration: 115000, + RecordsCount: 5, }, - }, - { - name: "DataGather with ObfuscateNetworking DataPolicy and some disabled gatherers", - disabledGatherers: []string{ - "clusterconfig/foo", - "clusterconfig/bar", - "workloads", - }, - dataPolicy: v1alpha1.ObfuscateNetworking, - expected: &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "periodic-gathering-", + expectedGs: v1.GathererStatus{ + Name: "gatherer1/foo", + LastGatherDuration: metav1.Duration{ + Duration: 115000000000, }, - Spec: v1alpha1.DataGatherSpec{ - DataPolicy: "ObfuscateNetworking", - Gatherers: []v1alpha1.GathererConfig{ - { - Name: "clusterconfig/foo", - State: v1alpha1.Disabled, - }, - { - Name: "clusterconfig/bar", - State: v1alpha1.Disabled, - }, - { - Name: "workloads", - State: v1alpha1.Disabled, - }, + Conditions: []metav1.Condition{ + { + Type: DataGatheredCondition, + Status: metav1.ConditionTrue, + Reason: GatheredOKReason, + Message: "Created 5 records in the archive.", }, }, }, }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dg, err := mockController.createNewDataGatherCR(context.Background(), tt.disabledGatherers, tt.dataPolicy) - assert.NoError(t, err) - assert.Equal(t, tt.expected, dg) - err = cs.InsightsV1alpha1().DataGathers().Delete(context.Background(), dg.Name, metav1.DeleteOptions{}) - assert.NoError(t, err) - }) - } -} - -func TestCopyDataGatherStatusToOperatorStatus(t *testing.T) { - tests := []struct { - name string - testedDataGather v1alpha1.DataGather - testedInsightsOperator operatorv1.InsightsOperator - expected *operatorv1.InsightsOperator - }{ { - name: "Basic copy status test", - testedDataGather: v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{Name: "foo"}, - Status: v1alpha1.DataGatherStatus{ - State: v1alpha1.Failed, - StartTime: metav1.Date(2020, 5, 13, 2, 30, 0, 0, time.UTC), - FinishTime: metav1.Date(2020, 5, 13, 2, 56, 54, 0, time.UTC), - Gatherers: []v1alpha1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 35, 5, 0, time.UTC), - }, - }, - }, - { - Name: "clusterconfig/bar", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatherErrorReason, - Message: "Gatherer failed", - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 36, 5, 0, time.UTC), - }, - }, - }, - { - Name: "workloads", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 38, 5, 0, time.UTC), - }, - }, - }, - }, - }, - }, - testedInsightsOperator: operatorv1.InsightsOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - Status: operatorv1.InsightsOperatorStatus{ - GatherStatus: operatorv1.GatherStatus{ - LastGatherTime: metav1.Date(2020, 5, 12, 2, 0, 0, 0, time.UTC), - LastGatherDuration: metav1.Duration{Duration: 5 * time.Minute}, - Gatherers: []operatorv1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 12, 1, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - }, + name: "No Data", + gfr: gather.GathererFunctionReport{ + FuncName: "gatherer2/baz", + Duration: 0, + RecordsCount: 0, }, - expected: &operatorv1.InsightsOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", + expectedGs: v1.GathererStatus{ + Name: "gatherer2/baz", + LastGatherDuration: metav1.Duration{ + Duration: 0, }, - Status: operatorv1.InsightsOperatorStatus{ - GatherStatus: operatorv1.GatherStatus{ - LastGatherTime: metav1.Date(2020, 5, 13, 2, 56, 54, 0, time.UTC), - LastGatherDuration: metav1.Duration{ - Duration: 1614 * time.Second, - }, - Gatherers: []operatorv1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 35, 5, 0, time.UTC), - }, - }, - }, - { - Name: "clusterconfig/bar", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatherErrorReason, - Message: "Gatherer failed", - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 36, 5, 0, time.UTC), - }, - }, - }, - { - Name: "workloads", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 38, 5, 0, time.UTC), - }, - }, - }, - }, + Conditions: []metav1.Condition{ + { + Type: DataGatheredCondition, + Status: metav1.ConditionFalse, + Reason: NoDataGatheredReason, }, }, }, }, { - name: "InsightsReport attribute is not updated when copying", - testedDataGather: v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{Name: "foo"}, - Status: v1alpha1.DataGatherStatus{ - State: v1alpha1.Failed, - StartTime: metav1.Date(2020, 5, 13, 2, 30, 0, 0, time.UTC), - FinishTime: metav1.Date(2020, 5, 13, 2, 56, 54, 0, time.UTC), - Gatherers: []v1alpha1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 35, 5, 0, time.UTC), - }, - }, - }, - }, - }, - }, - testedInsightsOperator: operatorv1.InsightsOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - Status: operatorv1.InsightsOperatorStatus{ - InsightsReport: operatorv1.InsightsReport{ - DownloadedAt: metav1.Date(2020, 5, 13, 2, 40, 0, 0, time.UTC), - HealthChecks: []operatorv1.HealthCheck{ - { - Description: "healtheck ABC", - TotalRisk: 1, - State: operatorv1.HealthCheckEnabled, - AdvisorURI: "test-uri", - }, - { - Description: "healtheck XYZ", - TotalRisk: 2, - State: operatorv1.HealthCheckEnabled, - }, - }, - }, - GatherStatus: operatorv1.GatherStatus{ - LastGatherTime: metav1.Date(2020, 5, 12, 2, 0, 0, 0, time.UTC), - LastGatherDuration: metav1.Duration{Duration: 5 * time.Minute}, - Gatherers: []operatorv1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 12, 1, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - }, - }, - expected: &operatorv1.InsightsOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - Status: operatorv1.InsightsOperatorStatus{ - InsightsReport: operatorv1.InsightsReport{ - DownloadedAt: metav1.Date(2020, 5, 13, 2, 40, 0, 0, time.UTC), - HealthChecks: []operatorv1.HealthCheck{ - { - Description: "healtheck ABC", - TotalRisk: 1, - State: operatorv1.HealthCheckEnabled, - AdvisorURI: "test-uri", - }, - { - Description: "healtheck XYZ", - TotalRisk: 2, - State: operatorv1.HealthCheckEnabled, - }, - }, - }, - GatherStatus: operatorv1.GatherStatus{ - LastGatherTime: metav1.Date(2020, 5, 13, 2, 56, 54, 0, time.UTC), - LastGatherDuration: metav1.Duration{ - Duration: 1614 * time.Second, - }, - Gatherers: []operatorv1.GathererStatus{ - { - Name: "clusterconfig/foo1", - Conditions: []metav1.Condition{ - { - Type: status.DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: status.GatheredOKReason, - LastTransitionTime: metav1.Date(2020, 5, 13, 2, 35, 5, 0, time.UTC), - }, - }, - }, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dataGatherFakeCS := insightsFakeCli.NewSimpleClientset(&tt.testedDataGather) - operatorFakeCS := fakeOperatorCli.NewSimpleClientset(&tt.testedInsightsOperator) - mockController := NewWithTechPreview(nil, nil, nil, nil, nil, - dataGatherFakeCS.InsightsV1alpha1(), operatorFakeCS.OperatorV1().InsightsOperators()) - updatedOperator, err := mockController.copyDataGatherStatusToOperatorStatus(context.Background(), tt.testedDataGather.Name) - assert.NoError(t, err) - assert.Equal(t, tt.expected, updatedOperator) - }) - } -} - -func TestCreateDataGatherAttributeValues(t *testing.T) { - tests := []struct { - name string - gatherConfig configv1alpha1.GatherConfig - gatheres []gatherers.Interface - expectedPolicy v1alpha1.DataPolicy - expectedDisabledGatherers []string - }{ - { - name: "Two disabled gatherers and ObfuscateNetworking Policy", - gatherConfig: configv1alpha1.GatherConfig{ - DataPolicy: configv1alpha1.ObfuscateNetworking, - DisabledGatherers: []string{ - "mock_gatherer", - "foo_gatherer", - }, - }, - gatheres: []gatherers.Interface{ - &gather.MockGatherer{}, - &gather.MockCustomPeriodGathererNoPeriod{ShouldBeProcessed: true}, - }, - expectedPolicy: v1alpha1.ObfuscateNetworking, - expectedDisabledGatherers: []string{"mock_gatherer", "foo_gatherer"}, - }, - { - name: "Custom period gatherer is excluded because it should not be processed", - gatherConfig: configv1alpha1.GatherConfig{ - DataPolicy: configv1alpha1.NoPolicy, - DisabledGatherers: []string{ - "clusterconfig/bar", - }, - }, - gatheres: []gatherers.Interface{ - &gather.MockGatherer{}, - &gather.MockCustomPeriodGathererNoPeriod{ShouldBeProcessed: false}, - }, - expectedPolicy: v1alpha1.NoPolicy, - expectedDisabledGatherers: []string{"clusterconfig/bar", "mock_custom_period_gatherer_no_period"}, - }, - { - name: "Empty data policy is created as NoPolicy/ClearText", - gatherConfig: configv1alpha1.GatherConfig{ - DataPolicy: "", - DisabledGatherers: []string{}, - }, - gatheres: []gatherers.Interface{ - &gather.MockGatherer{}, - &gather.MockCustomPeriodGathererNoPeriod{ShouldBeProcessed: true}, + name: "Gatherer Error", + gfr: gather.GathererFunctionReport{ + FuncName: "gatherer3/bar", + Duration: 0, + RecordsCount: 0, + Errors: []string{"unable to read the data"}, }, - expectedPolicy: v1alpha1.NoPolicy, - expectedDisabledGatherers: []string{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockAPIConfig := config.NewMockAPIConfigurator(&tt.gatherConfig) - mockController := NewWithTechPreview(nil, nil, mockAPIConfig, tt.gatheres, nil, nil, nil) - disabledGatherers, dp := mockController.createDataGatherAttributeValues() - assert.Equal(t, tt.expectedPolicy, dp) - assert.EqualValues(t, disabledGatherers, tt.expectedDisabledGatherers) - }) - } -} - -func TestGetInsightsImage(t *testing.T) { - tests := []struct { - name string - testDeployment appsv1.Deployment - expectedImageName string - expectedError error - }{ - { - name: "Successful image get", - testDeployment: appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "insights-operator", - Namespace: insightsNamespace, + expectedGs: v1.GathererStatus{ + Name: "gatherer3/bar", + LastGatherDuration: metav1.Duration{ + Duration: 0, }, - Spec: appsv1.DeploymentSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "test-image", - Image: "testing-image:123", - }, - }, - }, + Conditions: []metav1.Condition{ + { + Type: DataGatheredCondition, + Status: metav1.ConditionFalse, + Reason: GatherErrorReason, + Message: "unable to read the data", }, }, }, - expectedImageName: "testing-image:123", - expectedError: nil, }, { - name: "Empty deployment spec", - testDeployment: appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "insights-operator", - Namespace: insightsNamespace, - }, - Spec: appsv1.DeploymentSpec{}, + name: "Data gathered with an error", + gfr: gather.GathererFunctionReport{ + FuncName: "gatherer4/quz", + Duration: 9000, + RecordsCount: 2, + Errors: []string{"didn't find xyz configmap"}, }, - expectedImageName: "", - expectedError: fmt.Errorf("no container defined in the deployment"), - }, - { - name: "Multiple containers - first container image is returned", - testDeployment: appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "insights-operator", - Namespace: insightsNamespace, + expectedGs: v1.GathererStatus{ + Name: "gatherer4/quz", + LastGatherDuration: metav1.Duration{ + Duration: 9000000000, }, - Spec: appsv1.DeploymentSpec{ - Template: v1.PodTemplateSpec{ - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "test-image-1", - Image: "testing-image-1:123", - }, - { - Name: "test-image-2", - Image: "testing-image-1:123", - }, - }, - }, + Conditions: []metav1.Condition{ + { + Type: DataGatheredCondition, + Status: metav1.ConditionTrue, + Reason: GatheredWithErrorReason, + Message: "Created 2 records in the archive. Error: didn't find xyz configmap", }, }, }, - expectedImageName: "testing-image-1:123", - expectedError: nil, }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cs := kubefake.NewSimpleClientset(&tt.testDeployment) - mockController := NewWithTechPreview(nil, nil, nil, nil, cs, nil, nil) - imgName, err := mockController.getInsightsImage(context.Background()) - assert.Equal(t, tt.expectedError, err) - assert.Equal(t, tt.expectedImageName, imgName) - }) - } -} - -func TestPeriodicPrune(t *testing.T) { - tests := []struct { - name string - jobs []runtime.Object - dataGathers []runtime.Object - expectedJobs []string - expectedDataGathers []string - }{ { - name: "Basic pruning test", - jobs: []runtime.Object{ - &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-be-removed-job-1", - Namespace: insightsNamespace, - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-25 * time.Hour), - }, - }, - }, - &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-be-removed-job-2", - Namespace: insightsNamespace, - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-1441 * time.Minute), - }, - }, - }, - &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-keep-job-1", - Namespace: insightsNamespace, - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-23 * time.Hour), - }, - }, - }, - &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-keep-job-2", - Namespace: insightsNamespace, - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-2 * time.Hour), - }, - }, - }, + name: "Gatherer panicked", + gfr: gather.GathererFunctionReport{ + FuncName: "gatherer5/quz", + Duration: 0, + RecordsCount: 0, + Panic: "quz gatherer panicked", }, - dataGathers: []runtime.Object{ - &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-be-removed-dg-1", - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-25 * time.Hour), - }, - }, + expectedGs: v1.GathererStatus{ + Name: "gatherer5/quz", + LastGatherDuration: metav1.Duration{ + Duration: 0, }, - &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-be-removed-dg-2", - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-1441 * time.Minute), - }, - }, - }, - &v1alpha1.DataGather{ - ObjectMeta: metav1.ObjectMeta{ - Name: "to-keep-dg-1", - CreationTimestamp: metav1.Time{ - Time: metav1.Now().Time.Add(-1339 * time.Minute), - }, + Conditions: []metav1.Condition{ + { + Type: DataGatheredCondition, + Status: metav1.ConditionFalse, + Reason: GatherPanicReason, + Message: "quz gatherer panicked", }, }, }, - expectedJobs: []string{"to-keep-job-1", "to-keep-job-2"}, - expectedDataGathers: []string{"to-keep-dg-1"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - kubeCs := kubefake.NewSimpleClientset(tt.jobs...) - insightsCs := insightsFakeCli.NewSimpleClientset(tt.dataGathers...) - mockController := NewWithTechPreview(nil, nil, nil, nil, kubeCs, insightsCs.InsightsV1alpha1(), nil) - mockController.pruneInterval = 90 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - mockController.PeriodicPrune(ctx) + gathererStatus := createGathererStatus(&tt.gfr) + assert.Equal(t, tt.expectedGs.Name, gathererStatus.Name) + assert.Equal(t, tt.expectedGs.LastGatherDuration, gathererStatus.LastGatherDuration) - jobList, err := kubeCs.BatchV1().Jobs(insightsNamespace).List(context.Background(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, jobList.Items, 2) - for _, j := range jobList.Items { - assert.Contains(t, tt.expectedJobs, j.Name) - } - dataGathersList, err := insightsCs.InsightsV1alpha1().DataGathers().List(context.Background(), metav1.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, dataGathersList.Items, 1) - for _, dg := range dataGathersList.Items { - assert.Contains(t, tt.expectedDataGathers, dg.Name) - } + // more asserts since we can use simple equal because of the last transition time of the condition + assert.Len(t, gathererStatus.Conditions, 1) + assert.Equal(t, tt.expectedGs.Conditions[0].Type, gathererStatus.Conditions[0].Type) + assert.Equal(t, tt.expectedGs.Conditions[0].Reason, gathererStatus.Conditions[0].Reason) + assert.Equal(t, tt.expectedGs.Conditions[0].Status, gathererStatus.Conditions[0].Status) + assert.Equal(t, tt.expectedGs.Conditions[0].Message, gathererStatus.Conditions[0].Message) }) } } diff --git a/pkg/controller/status/gatherer_status.go b/pkg/controller/status/gatherer_status.go deleted file mode 100644 index 6ceb3fcc2..000000000 --- a/pkg/controller/status/gatherer_status.go +++ /dev/null @@ -1,117 +0,0 @@ -package status - -import ( - "fmt" - "strings" - "time" - - "github.com/openshift/api/insights/v1alpha1" - v1 "github.com/openshift/api/operator/v1" - "github.com/openshift/insights-operator/pkg/gather" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - DataGatheredCondition = "DataGathered" - // NoDataGathered is a reason when there is no data gathered - e.g the resource is not in a cluster - NoDataGatheredReason = "NoData" - // Error is a reason when there is some error and no data gathered - GatherErrorReason = "GatherError" - // Panic is a reason when there is some error and no data gathered - GatherPanicReason = "GatherPanic" - // GatheredOK is a reason when data is gathered as expected - GatheredOKReason = "GatheredOK" - // GatheredWithError is a reason when data is gathered partially or with another error message - GatheredWithErrorReason = "GatheredWithError" -) - -// CreateOperatorGathererStatus creates GathererStatus attribute for the "insightsoperator.operator.openshift.io" -// custom resource type. -func CreateOperatorGathererStatus(gfr *gather.GathererFunctionReport) v1.GathererStatus { - gs := v1.GathererStatus{ - Name: gfr.FuncName, - LastGatherDuration: metav1.Duration{ - // v.Duration is in milliseconds and we need nanoseconds - Duration: time.Duration(gfr.Duration * 1000000), - }, - } - - gs.Conditions = createGathererConditions(gfr) - return gs -} - -// CreateDataGatherGathererStatus creates GathererStatus attribute for the "datagather.insights.openshift.io" -// custom resource type. -func CreateDataGatherGathererStatus(gfr *gather.GathererFunctionReport) v1alpha1.GathererStatus { - gs := v1alpha1.GathererStatus{ - Name: gfr.FuncName, - LastGatherDuration: metav1.Duration{ - // v.Duration is in milliseconds and we need nanoseconds - Duration: time.Duration(gfr.Duration * 1000000), - }, - } - - gs.Conditions = createGathererConditions(gfr) - return gs -} - -// createGathererConditions creates GathererConditions based on gatherer result passed in as -// GathererFunctionReport. -func createGathererConditions(gfr *gather.GathererFunctionReport) []metav1.Condition { - conditions := []metav1.Condition{} - - con := metav1.Condition{ - Type: DataGatheredCondition, - LastTransitionTime: metav1.Now(), - Status: metav1.ConditionFalse, - Reason: NoDataGatheredReason, - } - - if gfr.Panic != nil { - con.Reason = GatherPanicReason - con.Message = gfr.Panic.(string) - } - - if gfr.RecordsCount > 0 { - con.Status = metav1.ConditionTrue - con.Reason = GatheredOKReason - con.Message = fmt.Sprintf("Created %d records in the archive.", gfr.RecordsCount) - - if len(gfr.Errors) > 0 { - con.Reason = GatheredWithErrorReason - con.Message = fmt.Sprintf("%s Error: %s", con.Message, strings.Join(gfr.Errors, ",")) - } - - conditions = append(conditions, con) - return conditions - } - - if len(gfr.Errors) > 0 { - con.Reason = GatherErrorReason - con.Message = strings.Join(gfr.Errors, ",") - } - - conditions = append(conditions, con) - return conditions -} - -// DataGatherStatusToOperatorGatherStatus copies "DataGatherStatus" from "datagather.openshift.io" and creates -// "GatherStatus" for "insightsoperator.operator.openshift.io" -func DataGatherStatusToOperatorGatherStatus(dgGatherStatus *v1alpha1.DataGatherStatus) v1.GatherStatus { - operatorGatherStatus := v1.GatherStatus{} - operatorGatherStatus.LastGatherTime = dgGatherStatus.FinishTime - operatorGatherStatus.LastGatherDuration = metav1.Duration{ - Duration: dgGatherStatus.FinishTime.Sub(dgGatherStatus.StartTime.Time), - } - - for _, g := range dgGatherStatus.Gatherers { - gs := v1.GathererStatus{ - Name: g.Name, - LastGatherDuration: g.LastGatherDuration, - Conditions: g.Conditions, - } - operatorGatherStatus.Gatherers = append(operatorGatherStatus.Gatherers, gs) - } - - return operatorGatherStatus -} diff --git a/pkg/controller/status/gatherer_status_test.go b/pkg/controller/status/gatherer_status_test.go deleted file mode 100644 index 9593777b5..000000000 --- a/pkg/controller/status/gatherer_status_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package status - -import ( - "testing" - - v1 "github.com/openshift/api/operator/v1" - "github.com/openshift/insights-operator/pkg/gather" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_createGathererStatus(t *testing.T) { //nolint: funlen - tests := []struct { - name string - gfr gather.GathererFunctionReport - expectedGs v1.GathererStatus - }{ - { - name: "Data gathered OK", - gfr: gather.GathererFunctionReport{ - FuncName: "gatherer1/foo", - Duration: 115000, - RecordsCount: 5, - }, - expectedGs: v1.GathererStatus{ - Name: "gatherer1/foo", - LastGatherDuration: metav1.Duration{ - Duration: 115000000000, - }, - Conditions: []metav1.Condition{ - { - Type: DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: GatheredOKReason, - Message: "Created 5 records in the archive.", - }, - }, - }, - }, - { - name: "No Data", - gfr: gather.GathererFunctionReport{ - FuncName: "gatherer2/baz", - Duration: 0, - RecordsCount: 0, - }, - expectedGs: v1.GathererStatus{ - Name: "gatherer2/baz", - LastGatherDuration: metav1.Duration{ - Duration: 0, - }, - Conditions: []metav1.Condition{ - { - Type: DataGatheredCondition, - Status: metav1.ConditionFalse, - Reason: NoDataGatheredReason, - }, - }, - }, - }, - { - name: "Gatherer Error", - gfr: gather.GathererFunctionReport{ - FuncName: "gatherer3/bar", - Duration: 0, - RecordsCount: 0, - Errors: []string{"unable to read the data"}, - }, - expectedGs: v1.GathererStatus{ - Name: "gatherer3/bar", - LastGatherDuration: metav1.Duration{ - Duration: 0, - }, - Conditions: []metav1.Condition{ - { - Type: DataGatheredCondition, - Status: metav1.ConditionFalse, - Reason: GatherErrorReason, - Message: "unable to read the data", - }, - }, - }, - }, - { - name: "Data gathered with an error", - gfr: gather.GathererFunctionReport{ - FuncName: "gatherer4/quz", - Duration: 9000, - RecordsCount: 2, - Errors: []string{"didn't find xyz configmap"}, - }, - expectedGs: v1.GathererStatus{ - Name: "gatherer4/quz", - LastGatherDuration: metav1.Duration{ - Duration: 9000000000, - }, - Conditions: []metav1.Condition{ - { - Type: DataGatheredCondition, - Status: metav1.ConditionTrue, - Reason: GatheredWithErrorReason, - Message: "Created 2 records in the archive. Error: didn't find xyz configmap", - }, - }, - }, - }, - { - name: "Gatherer panicked", - gfr: gather.GathererFunctionReport{ - FuncName: "gatherer5/quz", - Duration: 0, - RecordsCount: 0, - Panic: "quz gatherer panicked", - }, - expectedGs: v1.GathererStatus{ - Name: "gatherer5/quz", - LastGatherDuration: metav1.Duration{ - Duration: 0, - }, - Conditions: []metav1.Condition{ - { - Type: DataGatheredCondition, - Status: metav1.ConditionFalse, - Reason: GatherPanicReason, - Message: "quz gatherer panicked", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gathererStatus := CreateOperatorGathererStatus(&tt.gfr) - assert.Equal(t, tt.expectedGs.Name, gathererStatus.Name) - assert.Equal(t, tt.expectedGs.LastGatherDuration, gathererStatus.LastGatherDuration) - - // more asserts since we can use simple equal because of the last transition time of the condition - assert.Len(t, gathererStatus.Conditions, 1) - assert.Equal(t, tt.expectedGs.Conditions[0].Type, gathererStatus.Conditions[0].Type) - assert.Equal(t, tt.expectedGs.Conditions[0].Reason, gathererStatus.Conditions[0].Reason) - assert.Equal(t, tt.expectedGs.Conditions[0].Status, gathererStatus.Conditions[0].Status) - assert.Equal(t, tt.expectedGs.Conditions[0].Message, gathererStatus.Conditions[0].Message) - }) - } -} diff --git a/pkg/gather/gather.go b/pkg/gather/gather.go index f674740be..cd24c80f7 100644 --- a/pkg/gather/gather.go +++ b/pkg/gather/gather.go @@ -12,7 +12,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" - "github.com/openshift/api/insights/v1alpha1" + "github.com/openshift/api/config/v1alpha1" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/config/configobserver" "github.com/openshift/insights-operator/pkg/gatherers" @@ -83,10 +83,10 @@ func CollectAndRecordGatherer( ctx context.Context, gatherer gatherers.Interface, rec recorder.Interface, - gatherConfigs []v1alpha1.GathererConfig, + gatherConfig *v1alpha1.GatherConfig, ) ([]GathererFunctionReport, error) { startTime := time.Now() - reports, totalNumberOfRecords, errs := collectAndRecordGatherer(ctx, gatherer, rec, gatherConfigs) + reports, totalNumberOfRecords, errs := collectAndRecordGatherer(ctx, gatherer, rec, gatherConfig) reports = append(reports, GathererFunctionReport{ FuncName: gatherer.GetName(), Duration: time.Since(startTime).Milliseconds(), @@ -101,9 +101,9 @@ func collectAndRecordGatherer( ctx context.Context, gatherer gatherers.Interface, rec recorder.Interface, - gatherConfigs []v1alpha1.GathererConfig, + gatherConfig *v1alpha1.GatherConfig, ) (reports []GathererFunctionReport, totalNumberOfRecords int, allErrors []error) { - resultsChan, err := startGatheringConcurrently(ctx, gatherer, gatherConfigs) + resultsChan, err := startGatheringConcurrently(ctx, gatherer, gatherConfig) if err != nil { allErrors = append(allErrors, err) return reports, totalNumberOfRecords, allErrors @@ -234,7 +234,7 @@ func RecordArchiveMetadata( // startGatheringConcurrently starts gathering of enabled functions of the provided gatherer and returns a channel // with results which will be closed when processing is done func startGatheringConcurrently( - ctx context.Context, gatherer gatherers.Interface, gatherConfigs []v1alpha1.GathererConfig, + ctx context.Context, gatherer gatherers.Interface, gatheringConfig *v1alpha1.GatherConfig, ) (chan GatheringFunctionResult, error) { var tasks []Task var gatheringFunctions map[string]gatherers.GatheringClosure @@ -244,8 +244,8 @@ func startGatheringConcurrently( } // This is from TechPreview feature so we have to check the nil - if len(gatherConfigs) > 0 { - gatheringFunctions = getEnabledGatheringFunctions(gatherer.GetName(), gatheringFunctions, gatherConfigs) + if gatheringConfig != nil { + gatheringFunctions = getEnabledGatheringFunctions(gatherer.GetName(), gatheringFunctions, gatheringConfig.DisabledGatherers) } if len(gatheringFunctions) == 0 { @@ -266,29 +266,20 @@ func startGatheringConcurrently( // creates a new map without all the disabled functions func getEnabledGatheringFunctions(gathererName string, allGatheringFunctions map[string]gatherers.GatheringClosure, - gathererConfigs []v1alpha1.GathererConfig) map[string]gatherers.GatheringClosure { + disabledFunctions []string) map[string]gatherers.GatheringClosure { enabledGatheringFunctions := make(map[string]gatherers.GatheringClosure) // disabling a complete gatherer - e.g workloads - if isGathererDisabled(gathererConfigs, gathererName) { + if utils.StringInSlice(gathererName, disabledFunctions) { klog.Infof("%s gatherer is completely disabled", gathererName) return enabledGatheringFunctions } for fName, gatherinClosure := range allGatheringFunctions { fullGathererName := fmt.Sprintf("%s/%s", gathererName, fName) - if !isGathererDisabled(gathererConfigs, fullGathererName) { + if !utils.StringInSlice(fullGathererName, disabledFunctions) { enabledGatheringFunctions[fName] = gatherinClosure } } return enabledGatheringFunctions } - -func isGathererDisabled(gathererConfigs []v1alpha1.GathererConfig, gathererName string) bool { - for _, gf := range gathererConfigs { - if gf.Name == gathererName && gf.State == v1alpha1.Disabled { - return true - } - } - return false -} diff --git a/pkg/gather/gather_test.go b/pkg/gather/gather_test.go index 79526a3ab..e0e8cd2ff 100644 --- a/pkg/gather/gather_test.go +++ b/pkg/gather/gather_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/openshift/api/insights/v1alpha1" + "github.com/openshift/api/config/v1alpha1" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/config" "github.com/openshift/insights-operator/pkg/gatherers" @@ -22,11 +22,11 @@ import ( func Test_getEnabledGatheringFunctions(t *testing.T) { tests := []struct { - testName string - gathererName string - all map[string]gatherers.GatheringClosure - gathererConfigs []v1alpha1.GathererConfig - expected map[string]gatherers.GatheringClosure + testName string + gathererName string + all map[string]gatherers.GatheringClosure + disabled []string + expected map[string]gatherers.GatheringClosure }{ { testName: "disable some functions", @@ -37,14 +37,9 @@ func Test_getEnabledGatheringFunctions(t *testing.T) { "authentication": {}, "some_function": {}, }, - gathererConfigs: []v1alpha1.GathererConfig{ - { - Name: "clusterconfig/container_images", - State: v1alpha1.Disabled, - }, { - Name: "clusterconfig/nodes", - State: v1alpha1.Disabled, - }, + disabled: []string{ + "clusterconfig/container_images", + "clusterconfig/nodes", }, expected: map[string]gatherers.GatheringClosure{ "authentication": {}, @@ -60,15 +55,9 @@ func Test_getEnabledGatheringFunctions(t *testing.T) { "authentication": {}, "some_function": {}, }, - gathererConfigs: []v1alpha1.GathererConfig{ - { - Name: "clusterconfig/foo", - State: v1alpha1.Disabled, - }, - { - Name: "clusterconfig/bar", - State: v1alpha1.Disabled, - }, + disabled: []string{ + "clusterconfig/foo", + "clusterconfig/bar", }, expected: map[string]gatherers.GatheringClosure{ "container_images": {}, @@ -86,11 +75,8 @@ func Test_getEnabledGatheringFunctions(t *testing.T) { "authentication": {}, "some_function": {}, }, - gathererConfigs: []v1alpha1.GathererConfig{ - { - Name: "clusterconfig", - State: v1alpha1.Disabled, - }, + disabled: []string{ + "clusterconfig", }, expected: map[string]gatherers.GatheringClosure{}, }, @@ -103,7 +89,7 @@ func Test_getEnabledGatheringFunctions(t *testing.T) { "authentication": {}, "some_function": {}, }, - gathererConfigs: []v1alpha1.GathererConfig{}, + disabled: []string{}, expected: map[string]gatherers.GatheringClosure{ "container_images": {}, "nodes": {}, @@ -115,7 +101,7 @@ func Test_getEnabledGatheringFunctions(t *testing.T) { for _, tt := range tests { t.Run(tt.testName, func(t *testing.T) { - result := getEnabledGatheringFunctions(tt.gathererName, tt.all, tt.gathererConfigs) + result := getEnabledGatheringFunctions(tt.gathererName, tt.all, tt.disabled) assert.Equal(t, tt.expected, result) }) } @@ -185,22 +171,12 @@ func Test_StartGatheringConcurrently(t *testing.T) { }, }) - resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer/3_records", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/errors", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/panic", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/name", - State: v1alpha1.Disabled, + resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, &v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer/3_records", + "mock_gatherer/errors", + "mock_gatherer/panic", + "mock_gatherer/name", }, }) assert.NoError(t, err) @@ -221,21 +197,13 @@ func Test_StartGatheringConcurrently(t *testing.T) { }, }) - resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer/some_field", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/errors", - State: v1alpha1.Disabled, + resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, &v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer/some_field", + "mock_gatherer/errors", + "mock_gatherer/panic", }, - { - Name: "mock_gatherer/panic", - State: v1alpha1.Disabled, - }, - }, - ) + }) assert.NoError(t, err) results = gatherResultsFromChannel(resultsChan) assert.Len(t, results, 2) @@ -276,35 +244,21 @@ func Test_StartGatheringConcurrently(t *testing.T) { func Test_StartGatheringConcurrently_Error(t *testing.T) { gatherer := &MockGatherer{SomeField: "some_value"} - resultsChan, err := startGatheringConcurrently(context.Background(), gatherer, []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer/some_field", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/errors", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/panic", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/name", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/3_records", - State: v1alpha1.Disabled, + resultsChan, err := startGatheringConcurrently(context.Background(), gatherer, &v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer/some_field", + "mock_gatherer/errors", + "mock_gatherer/panic", + "mock_gatherer/name", + "mock_gatherer/3_records", }, }) assert.EqualError(t, err, "no gather functions are specified to run") assert.Nil(t, resultsChan) - resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer", - State: v1alpha1.Disabled, + resultsChan, err = startGatheringConcurrently(context.Background(), gatherer, &v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer", }, }) assert.EqualError(t, err, "no gather functions are specified to run") @@ -316,13 +270,14 @@ func Test_CollectAndRecordGatherer(t *testing.T) { SomeField: "some_value", } mockRecorder := &recorder.MockRecorder{} + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{}) mockSecretConfigurator := config.NewMockSecretConfigurator(&config.Controller{ EnableGlobalObfuscation: true, }) - anonymizer, err := anonymization.NewAnonymizer("", nil, nil, mockSecretConfigurator, "") + anonymizer, err := anonymization.NewAnonymizer("", nil, nil, mockSecretConfigurator, mockAPIConfigurator) assert.NoError(t, err) - functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, nil) + functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, mockAPIConfigurator.GatherConfig()) assert.Error(t, err) err = RecordArchiveMetadata(functionReports, mockRecorder, anonymizer) @@ -393,31 +348,22 @@ func Test_CollectAndRecordGatherer(t *testing.T) { func Test_CollectAndRecordGatherer_Error(t *testing.T) { gatherer := &MockGatherer{} mockRecorder := &recorder.MockRecorder{} - gatherersConfig := []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer/some_field", - State: v1alpha1.Disabled, - }, { - Name: "mock_gatherer/name", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/panic", - State: v1alpha1.Disabled, + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer/some_field", + "mock_gatherer/name", + "mock_gatherer/panic", + "mock_gatherer/3_records", }, - { - Name: "mock_gatherer/3_records", - State: v1alpha1.Disabled, - }, - } + }) - functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, gatherersConfig) + functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, mockAPIConfigurator.GatherConfig()) assert.EqualError( t, err, `function "errors" failed with an error`, ) - anonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, config.NewMockSecretConfigurator(nil), "") + anonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, config.NewMockSecretConfigurator(nil), mockAPIConfigurator) assert.NoError(t, err) err = RecordArchiveMetadata(functionReports, mockRecorder, anonymizer) assert.NoError(t, err) @@ -448,25 +394,16 @@ func Test_CollectAndRecordGatherer_Error(t *testing.T) { func Test_CollectAndRecordGatherer_Panic(t *testing.T) { gatherer := &MockGatherer{} mockRecorder := &recorder.MockRecorder{} - gatherersConfig := []v1alpha1.GathererConfig{ - { - Name: "mock_gatherer/some_field", - State: v1alpha1.Disabled, - }, { - Name: "mock_gatherer/name", - State: v1alpha1.Disabled, + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{ + DisabledGatherers: []string{ + "mock_gatherer/some_field", + "mock_gatherer/name", + "mock_gatherer/errors", + "mock_gatherer/3_records", }, - { - Name: "mock_gatherer/errors", - State: v1alpha1.Disabled, - }, - { - Name: "mock_gatherer/3_records", - State: v1alpha1.Disabled, - }, - } + }) - functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, gatherersConfig) + functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, mockRecorder, mockAPIConfigurator.GatherConfig()) assert.EqualError(t, err, `function "panic" panicked`) assert.Len(t, functionReports, 2) functionReports[0].Duration = 0 @@ -507,11 +444,12 @@ func Test_CollectAndRecordGatherer_DuplicateRecords(t *testing.T) { }}, }} mockDriver := &MockDriver{} - anonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, config.NewMockSecretConfigurator(nil), "") + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{}) + anonymizer, err := anonymization.NewAnonymizer("", []string{}, nil, config.NewMockSecretConfigurator(nil), mockAPIConfigurator) assert.NoError(t, err) rec := recorder.New(mockDriver, time.Second, anonymizer) - functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, rec, nil) + functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, rec, mockAPIConfigurator.GatherConfig()) assert.Error(t, err) assert.NotEmpty(t, functionReports) assert.Len(t, functionReports, 4) @@ -557,8 +495,9 @@ func Test_CollectAndRecordGatherer_Warning(t *testing.T) { }} mockDriver := &MockDriver{} rec := recorder.New(mockDriver, time.Second, nil) + mockAPIConfigurator := config.NewMockAPIConfigurator(nil) - functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, rec, nil) + functionReports, err := CollectAndRecordGatherer(context.Background(), gatherer, rec, mockAPIConfigurator.GatherConfig()) assert.NoError(t, err) assert.Len(t, functionReports, 2) assert.Equal(t, "mock_gatherer_with_provided_functions/function_1", functionReports[0].FuncName) diff --git a/pkg/insights/insightsclient/requests.go b/pkg/insights/insightsclient/requests.go index 3e615d11a..abbdaae8f 100644 --- a/pkg/insights/insightsclient/requests.go +++ b/pkg/insights/insightsclient/requests.go @@ -15,18 +15,19 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" ) -func (c *Client) SendAndGetID(ctx context.Context, endpoint string, source Source) (string, error) { +// Send uploads archives to Ingress service +func (c *Client) Send(ctx context.Context, endpoint string, source Source) error { cv, err := c.GetClusterVersion() if apierrors.IsNotFound(err) { - return "", ErrWaitingForVersion + return ErrWaitingForVersion } if err != nil { - return "", err + return err } req, err := c.prepareRequest(ctx, http.MethodPost, endpoint, cv) if err != nil { - return "", err + return err } bytesRead := make(chan int64, 1) @@ -44,7 +45,7 @@ func (c *Client) SendAndGetID(ctx context.Context, endpoint string, source Sourc klog.V(4).Infof("Unable to build a request, possible invalid token: %v", err) // if the request is not build, for example because of invalid endpoint,(maybe some problem with DNS), we want to have record about it in metrics as well. counterRequestSend.WithLabelValues(c.metricsName, "0").Inc() - return "", fmt.Errorf("unable to build request to connect to Insights server: %v", err) + return fmt.Errorf("unable to build request to connect to Insights server: %v", err) } requestID := resp.Header.Get(insightsReqId) @@ -62,33 +63,27 @@ func (c *Client) SendAndGetID(ctx context.Context, endpoint string, source Sourc if resp.StatusCode == http.StatusUnauthorized { klog.V(2).Infof("gateway server %s returned 401, %s=%s", resp.Request.URL, insightsReqId, requestID) - return "", authorizer.Error{Err: fmt.Errorf("your Red Hat account is not enabled for remote support or your token has expired: %s", responseBody(resp))} + return authorizer.Error{Err: fmt.Errorf("your Red Hat account is not enabled for remote support or your token has expired: %s", responseBody(resp))} } if resp.StatusCode == http.StatusForbidden { klog.V(2).Infof("gateway server %s returned 403, %s=%s", resp.Request.URL, insightsReqId, requestID) - return "", authorizer.Error{Err: fmt.Errorf("your Red Hat account is not enabled for remote support")} + return authorizer.Error{Err: fmt.Errorf("your Red Hat account is not enabled for remote support")} } if resp.StatusCode == http.StatusBadRequest { - return "", fmt.Errorf("gateway server bad request: %s (request=%s): %s", resp.Request.URL, requestID, responseBody(resp)) + return fmt.Errorf("gateway server bad request: %s (request=%s): %s", resp.Request.URL, requestID, responseBody(resp)) } if resp.StatusCode >= 300 || resp.StatusCode < 200 { - return "", fmt.Errorf("gateway server reported unexpected error code: %d (request=%s): %s", resp.StatusCode, requestID, responseBody(resp)) + return fmt.Errorf("gateway server reported unexpected error code: %d (request=%s): %s", resp.StatusCode, requestID, responseBody(resp)) } if len(requestID) > 0 { klog.V(2).Infof("Successfully reported id=%s %s=%s, wrote=%d", source.ID, insightsReqId, requestID, <-bytesRead) } - return requestID, nil -} - -// Send uploads archives to Ingress service -func (c *Client) Send(ctx context.Context, endpoint string, source Source) error { - _, err := c.SendAndGetID(ctx, endpoint, source) - return err + return nil } // RecvReport performs a request to Insights Results Smart Proxy endpoint diff --git a/pkg/insights/insightsreport/insightsreport.go b/pkg/insights/insightsreport/insightsreport.go index e5d7ca166..a0ceb619a 100644 --- a/pkg/insights/insightsreport/insightsreport.go +++ b/pkg/insights/insightsreport/insightsreport.go @@ -77,16 +77,6 @@ func New(client *insightsclient.Client, configurator configobserver.Configurator } } -func NewWithTechPreview(client *insightsclient.Client, configurator configobserver.Configurator, insightsOperatorCLI operatorv1client.InsightsOperatorInterface) *Controller { - return &Controller{ - StatusController: controllerstatus.New("insightsreport"), - configurator: configurator, - client: client, - archiveUploadReporter: nil, - insightsOperatorCLI: insightsOperatorCLI, - } -} - // PullSmartProxy performs a request to the Smart Proxy and unmarshal the response func (c *Controller) PullSmartProxy() (bool, error) { klog.Info("Pulling report from smart-proxy") diff --git a/pkg/insights/insightsuploader/insightsuploader.go b/pkg/insights/insightsuploader/insightsuploader.go index a7d166149..6a372e2b3 100644 --- a/pkg/insights/insightsuploader/insightsuploader.go +++ b/pkg/insights/insightsuploader/insightsuploader.go @@ -40,7 +40,6 @@ type Controller struct { reporter StatusReporter archiveUploaded chan struct{} initialDelay time.Duration - backoff wait.Backoff } func New(summarizer Summarizer, @@ -50,7 +49,7 @@ func New(summarizer Summarizer, statusReporter StatusReporter, initialDelay time.Duration) *Controller { - ctrl := &Controller{ + return &Controller{ StatusController: controllerstatus.New("insightsuploader"), summarizer: summarizer, secretConfigurator: secretconfigurator, @@ -60,12 +59,6 @@ func New(summarizer Summarizer, archiveUploaded: make(chan struct{}), initialDelay: initialDelay, } - ctrl.backoff = wait.Backoff{ - Duration: ctrl.secretConfigurator.Config().Interval / 4, // 30 min as first wait by default - Steps: 4, - Factor: 2, - } - return ctrl } func (c *Controller) Run(ctx context.Context) { @@ -182,31 +175,6 @@ func (c *Controller) ArchiveUploaded() <-chan struct{} { return c.archiveUploaded } -// Upload is an alternative simple upload method used only in TechPreview clusters. -// Returns Insights request ID and error=nil in case of successful data upload. -func (c *Controller) Upload(ctx context.Context, s *insightsclient.Source) (string, error) { - defer s.Contents.Close() - start := time.Now() - s.ID = start.Format(time.RFC3339) - s.Type = "application/vnd.redhat.openshift.periodic" - var requestID string - err := wait.ExponentialBackoff(c.backoff, func() (done bool, err error) { - requestID, err = c.client.SendAndGetID(ctx, c.secretConfigurator.Config().Endpoint, *s) - if err != nil { - klog.V(2).Infof("Unable to upload report after %s: %v", time.Since(start).Truncate(time.Second/100), err) - klog.Errorf("%v. Trying again in %s %d", err, c.backoff.Step(), c.backoff.Steps) - return false, nil - // TODO we would need to propagate the error as HTTP - } - return true, err - }) - if err != nil { - return "", err - } - klog.Infof("Uploaded report successfully in %s", time.Since(start)) - return requestID, nil -} - func reportToLogs(source io.Reader, klog klog.Verbose) error { if !klog.Enabled() { return nil diff --git a/pkg/recorder/diskrecorder/diskrecorder.go b/pkg/recorder/diskrecorder/diskrecorder.go index 153bf8ff3..6109aa0aa 100644 --- a/pkg/recorder/diskrecorder/diskrecorder.go +++ b/pkg/recorder/diskrecorder/diskrecorder.go @@ -178,36 +178,3 @@ func (d *DiskRecorder) Summary(_ context.Context, since time.Time) (*insightscli func isNotArchiveFile(file os.FileInfo) bool { return file.IsDir() || !strings.HasPrefix(file.Name(), "insights-") || !strings.HasSuffix(file.Name(), ".tar.gz") } - -// LastArchive tries to find the latest Insights archive. Returns an error -// when it can't read the base directory or when it can't open the last archive found. -func (d *DiskRecorder) LastArchive() (*insightsclient.Source, error) { - files, err := os.ReadDir(d.basePath) - if err != nil { - return nil, err - } - if len(files) == 0 { - return nil, nil - } - var lastTime time.Time - var lastArchive string - for _, file := range files { - fileInfo, err := file.Info() // nolint: govet - if err != nil { - return nil, err - } - if isNotArchiveFile(fileInfo) { - continue - } - if fileInfo.ModTime().After(lastTime) { - lastTime = fileInfo.ModTime() - lastArchive = file.Name() - } - } - f, err := os.Open(filepath.Join(d.basePath, lastArchive)) - if err != nil { - return nil, err - } - - return &insightsclient.Source{Contents: f, CreationTime: d.lastRecording}, nil -} diff --git a/pkg/recorder/diskrecorder/diskrecorder_test.go b/pkg/recorder/diskrecorder/diskrecorder_test.go index a61ecc2e0..4c11dd30b 100644 --- a/pkg/recorder/diskrecorder/diskrecorder_test.go +++ b/pkg/recorder/diskrecorder/diskrecorder_test.go @@ -3,7 +3,6 @@ package diskrecorder import ( "context" "fmt" - "os" "testing" "time" @@ -23,23 +22,17 @@ func getMemoryRecords() record.MemoryRecords { return records } -func newDiskRecorder() (DiskRecorder, error) { - basePath := "/tmp" - path, err := os.MkdirTemp(basePath, "insights-operator") - return DiskRecorder{basePath: path}, err +func newDiskRecorder() DiskRecorder { + return DiskRecorder{basePath: "/tmp"} } func Test_Diskrecorder_Save(t *testing.T) { - dr, err := newDiskRecorder() - assert.NoError(t, err) + dr := newDiskRecorder() records := getMemoryRecords() saved, err := dr.Save(records) assert.NoError(t, err) assert.Len(t, saved, len(records)) assert.WithinDuration(t, time.Now(), dr.lastRecording, 10*time.Second) - - err = removePath(dr) - assert.NoError(t, err) } func Test_Diskrecorder_SaveInvalidPath(t *testing.T) { @@ -48,14 +41,10 @@ func Test_Diskrecorder_SaveInvalidPath(t *testing.T) { saved, err := dr.Save(records) assert.Error(t, err) assert.Nil(t, saved) - - err = removePath(dr) - assert.NoError(t, err) } func Test_Diskrecorder_SaveFailsIfDuplicatedReport(t *testing.T) { - dr, err := newDiskRecorder() - assert.NoError(t, err) + dr := newDiskRecorder() records := record.MemoryRecords{ record.MemoryRecord{ Name: "config/mock1", @@ -70,41 +59,20 @@ func Test_Diskrecorder_SaveFailsIfDuplicatedReport(t *testing.T) { saved, err := dr.Save(records) assert.Error(t, err) assert.Nil(t, saved) - - err = removePath(dr) - assert.NoError(t, err) } func Test_Diskrecorder_Summary(t *testing.T) { - since := time.Now().Add(time.Duration(-2) * time.Second) - dr, err := newDiskRecorder() - assert.NoError(t, err) - - records := getMemoryRecords() - // we need some archives in the filesystem for the Summmary method - _, err = dr.Save(records) - assert.NoError(t, err) - - source, ok, err := dr.Summary(context.Background(), since) + since := time.Now().Add(time.Duration(-5) * time.Minute) + dr := newDiskRecorder() + source, ok, err := dr.Summary(context.TODO(), since) assert.NoError(t, err) assert.True(t, ok) assert.NotNil(t, source) - - err = removePath(dr) - assert.NoError(t, err) } func Test_Diskrecorder_Prune(t *testing.T) { olderThan := time.Now().Add(time.Duration(5) * time.Minute) - dr, err := newDiskRecorder() + dr := newDiskRecorder() + err := dr.Prune(olderThan) assert.NoError(t, err) - err = dr.Prune(olderThan) - assert.NoError(t, err) - - err = removePath(dr) - assert.NoError(t, err) -} - -func removePath(d DiskRecorder) error { - return os.RemoveAll(d.basePath) } diff --git a/pkg/recorder/recorder_test.go b/pkg/recorder/recorder_test.go index 925c7ceb6..1b3f1feae 100644 --- a/pkg/recorder/recorder_test.go +++ b/pkg/recorder/recorder_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "github.com/openshift/api/insights/v1alpha1" + "github.com/openshift/api/config/v1alpha1" "github.com/openshift/insights-operator/pkg/anonymization" "github.com/openshift/insights-operator/pkg/config" "github.com/openshift/insights-operator/pkg/record" @@ -59,17 +59,15 @@ func (d *driverMock) Prune(time.Time) error { return args.Error(1) } -func newRecorder(maxArchiveSize int64, clusterBaseDomain string) (*Recorder, error) { +func newRecorder(maxArchiveSize int64, clusterBaseDomain string) Recorder { driver := driverMock{} driver.On("Save").Return(nil, nil) mockSecretConfigurator := config.NewMockSecretConfigurator(&config.Controller{EnableGlobalObfuscation: true}) - anonymizer, err := anonymization.NewAnonymizer(clusterBaseDomain, nil, nil, mockSecretConfigurator, v1alpha1.ObfuscateNetworking) - if err != nil { - return nil, err - } + mockAPIConfigurator := config.NewMockAPIConfigurator(&v1alpha1.GatherConfig{DataPolicy: v1alpha1.ObfuscateNetworking}) + anonymizer, _ := anonymization.NewAnonymizer(clusterBaseDomain, nil, nil, mockSecretConfigurator, mockAPIConfigurator) interval, _ := time.ParseDuration("1m") - return &Recorder{ + return Recorder{ driver: &driver, interval: interval, maxAge: interval * 6 * 24, @@ -77,12 +75,11 @@ func newRecorder(maxArchiveSize int64, clusterBaseDomain string) (*Recorder, err records: make(map[string]*record.MemoryRecord), recordedFingerprints: make(map[string]string), anonymizer: anonymizer, - }, nil + } } func Test_Record(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") errs := rec.Record(record.Record{ Name: mock1Name, Item: RawReport{Data: "mock1"}, @@ -92,8 +89,7 @@ func Test_Record(t *testing.T) { } func Test_Record_Duplicated(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") errs := rec.Record(record.Record{ Name: mock1Name, Item: RawReport{Data: "mock1"}, @@ -108,8 +104,7 @@ func Test_Record_Duplicated(t *testing.T) { } func Test_Record_CantBeSerialized(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") errs := rec.Record(record.Record{ Name: mock1Name, Item: RawInvalidReport{}, @@ -119,8 +114,7 @@ func Test_Record_CantBeSerialized(t *testing.T) { } func Test_Record_Flush(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") for i := 0; i < 3; i++ { errs := rec.Record(record.Record{ Name: fmt.Sprintf("config/mock%d", i), @@ -132,23 +126,21 @@ func Test_Record_Flush(t *testing.T) { assert.Empty(t, errs) } } - err = rec.Flush() - assert.NoError(t, err) + err := rec.Flush() + assert.Nil(t, err) assert.Equal(t, int64(0), rec.size) } func Test_Record_FlushEmptyRecorder(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) - err = rec.Flush() - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") + err := rec.Flush() + assert.Nil(t, err) } func Test_Record_ArchiveSizeExceeded(t *testing.T) { data := "data bigger than 4 bytes" maxArchiveSize := int64(4) - rec, err := newRecorder(maxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(maxArchiveSize, "") errs := rec.Record(record.Record{ Name: mock1Name, Item: RawReport{ @@ -156,7 +148,7 @@ func Test_Record_ArchiveSizeExceeded(t *testing.T) { }, }) assert.Len(t, errs, 1) - err = errs[0] + err := errs[0] assert.Equal( t, err, @@ -177,8 +169,7 @@ func Test_Record_SizeDoesntGrowWithSameRecords(t *testing.T) { Data: data, }, } - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") errs := rec.Record(testRec) assert.Empty(t, errs) // record again the same record @@ -187,7 +178,7 @@ func Test_Record_SizeDoesntGrowWithSameRecords(t *testing.T) { // check that size refers only to one record data assert.Equal(t, rec.size, int64(len(data))) - err = rec.Flush() + err := rec.Flush() assert.Nil(t, err) assert.Equal(t, rec.size, int64(0)) } @@ -196,8 +187,7 @@ func Test_ObfuscatedRecord_NameCorrect(t *testing.T) { clusterBaseDomain := "test" testRecordName := fmt.Sprintf("%s/%s-node-1", mock1Name, clusterBaseDomain) obfuscatedRecordName := fmt.Sprintf("%s/%s-node-1", mock1Name, anonymization.ClusterBaseDomainPlaceholder) - rec, err := newRecorder(MaxArchiveSize, clusterBaseDomain) - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, clusterBaseDomain) errs := rec.Record(record.Record{ Name: testRecordName, Item: RawReport{ @@ -207,19 +197,19 @@ func Test_ObfuscatedRecord_NameCorrect(t *testing.T) { assert.Empty(t, errs) _, exists := rec.records[obfuscatedRecordName] assert.True(t, exists, "can't find %s record name", testRecordName) - err = rec.Flush() + err := rec.Flush() assert.Nil(t, err) assert.Equal(t, rec.size, int64(0)) } func Test_EmptyItemRecord(t *testing.T) { - rec, err := newRecorder(MaxArchiveSize, "") - assert.NoError(t, err) + rec := newRecorder(MaxArchiveSize, "") + testRec := record.Record{ Name: "test/empty", } errs := rec.Record(testRec) assert.Len(t, errs, 1) - err = errs[0] + err := errs[0] assert.Equal(t, fmt.Errorf(`empty "%s" record data. Nothing will be recorded`, testRec.Name), err) } diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/0000_10_01_datagather.crd.yaml b/vendor/github.com/openshift/api/insights/v1alpha1/0000_10_01_datagather.crd.yaml deleted file mode 100644 index 8bab51591..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/0000_10_01_datagather.crd.yaml +++ /dev/null @@ -1,323 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1365 - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: datagathers.insights.openshift.io -spec: - group: insights.openshift.io - names: - kind: DataGather - listKind: DataGatherList - plural: datagathers - singular: datagather - scope: Cluster - versions: - - additionalPrinterColumns: - - description: DataGather job state - jsonPath: .status.state - name: State - type: string - - description: DataGather start time - jsonPath: .status.startTime - name: StartTime - type: date - - description: DataGather finish time - jsonPath: .status.finishTime - name: FinishTime - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: "DataGather provides data gather configuration options and status for the particular Insights data gathering. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." - type: object - required: - - spec - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - type: object - properties: - dataPolicy: - description: dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are "ClearText" and "ObfuscateNetworking". When set to ClearText the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is ClearText. - type: string - enum: - - "" - - ClearText - - ObfuscateNetworking - gatherers: - description: 'gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: "oc get insightsoperators.operator.openshift.io cluster -o json | jq ''.status.gatherStatus.gatherers[].name''"' - type: array - items: - description: gathererConfig allows to configure specific gatherers - type: object - required: - - name - properties: - name: - description: name is the name of specific gatherer - type: string - state: - description: state allows you to configure specific gatherer. Valid values are "Enabled", "Disabled" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default. The current default is Enabled. - type: string - enum: - - "" - - Enabled - - Disabled - status: - description: status holds observed values from the cluster. They may not be overridden. - type: object - properties: - conditions: - description: conditions provide details on the status of the gatherer job. - type: array - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - dataGatherState: - description: dataGatherState reflects the current state of the data gathering process. - type: string - enum: - - Running - - Completed - - Failed - - Pending - x-kubernetes-validations: - - rule: '!(oldSelf == ''Running'' && self == ''Pending'')' - message: dataGatherState cannot transition from Running to Pending - - rule: '!(oldSelf == ''Completed'' && self == ''Pending'')' - message: dataGatherState cannot transition from Completed to Pending - - rule: '!(oldSelf == ''Failed'' && self == ''Pending'')' - message: dataGatherState cannot transition from Failed to Pending - - rule: '!(oldSelf == ''Completed'' && self == ''Running'')' - message: dataGatherState cannot transition from Completed to Running - - rule: '!(oldSelf == ''Failed'' && self == ''Running'')' - message: dataGatherState cannot transition from Failed to Running - finishTime: - description: finishTime is the time when Insights data gathering finished. - type: string - format: date-time - x-kubernetes-validations: - - rule: self == oldSelf - message: finishTime is immutable once set - gatherers: - description: gatherers is a list of active gatherers (and their statuses) in the last gathering. - type: array - items: - description: gathererStatus represents information about a particular data gatherer. - type: object - required: - - conditions - - lastGatherDuration - - name - properties: - conditions: - description: conditions provide details on the status of each gatherer. - type: array - minItems: 1 - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - type: object - required: - - lastTransitionTime - - message - - reason - - status - - type - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - type: string - format: date-time - message: - description: message is a human readable message indicating details about the transition. This may be an empty string. - type: string - maxLength: 32768 - observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. - type: integer - format: int64 - minimum: 0 - reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. - type: string - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - status: - description: status of the condition, one of True, False, Unknown. - type: string - enum: - - "True" - - "False" - - Unknown - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - type: string - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastGatherDuration: - description: lastGatherDuration represents the time spent gathering. - type: string - pattern: ^([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ - name: - description: name is the name of the gatherer. - type: string - maxLength: 256 - minLength: 5 - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - insightsReport: - description: insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by "insightsRequestID") is not available. - type: object - properties: - downloadedAt: - description: downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). - type: string - format: date-time - healthChecks: - description: healthChecks provides basic information about active Insights health checks in a cluster. - type: array - items: - description: healthCheck represents an Insights health check attributes. - type: object - required: - - advisorURI - - description - - state - - totalRisk - properties: - advisorURI: - description: advisorURI provides the URL link to the Insights Advisor. - type: string - pattern: ^https:\/\/\S+ - description: - description: description provides basic description of the healtcheck. - type: string - maxLength: 2048 - minLength: 10 - state: - description: state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. - type: string - enum: - - Enabled - - Disabled - totalRisk: - description: totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. - type: integer - format: int32 - maximum: 4 - minimum: 1 - x-kubernetes-list-type: atomic - uri: - description: uri provides the URL link from which the report was downloaded. - type: string - pattern: ^https:\/\/\S+ - insightsRequestID: - description: insightsRequestID is an Insights request ID to track the status of the Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive. - type: string - x-kubernetes-validations: - - rule: self == oldSelf - message: insightsRequestID is immutable once set - relatedObjects: - description: relatedObjects is a list of resources which are useful when debugging or inspecting the data gathering Pod - type: array - items: - description: ObjectReference contains enough information to let you inspect or modify the referred object. - type: object - required: - - group - - name - - resource - properties: - group: - description: 'group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc.' - type: string - pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - name: - description: name of the referent. - type: string - namespace: - description: namespace of the referent. - type: string - resource: - description: 'resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc.' - type: string - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - startTime: - description: startTime is the time when Insights data gathering started. - type: string - format: date-time - x-kubernetes-validations: - - rule: self == oldSelf - message: startTime is immutable once set - x-kubernetes-validations: - - rule: (!has(oldSelf.insightsRequestID) || has(self.insightsRequestID)) - message: cannot remove insightsRequestID attribute from status - - rule: (!has(oldSelf.startTime) || has(self.startTime)) - message: cannot remove startTime attribute from status - - rule: (!has(oldSelf.finishTime) || has(self.finishTime)) - message: cannot remove finishTime attribute from status - - rule: (!has(oldSelf.dataGatherState) || has(self.dataGatherState)) - message: cannot remove dataGatherState attribute from status - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/Makefile b/vendor/github.com/openshift/api/insights/v1alpha1/Makefile deleted file mode 100644 index c58cc64ac..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -.PHONY: test -test: - make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="insights.openshift.io/v1alpha1" \ No newline at end of file diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/doc.go b/vendor/github.com/openshift/api/insights/v1alpha1/doc.go deleted file mode 100644 index d0831e48f..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true - -// +kubebuilder:validation:Optional -// +groupName=insights.openshift.io -// Package v1alpha1 is the v1alpha1 version of the API. -package v1alpha1 diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/register.go b/vendor/github.com/openshift/api/insights/v1alpha1/register.go deleted file mode 100644 index 288412c63..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/register.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - GroupName = "insights.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme -) - -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, - &DataGather{}, - &DataGatherList{}, - ) - metav1.AddToGroupVersion(scheme, GroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/techpreview.datagather.testsuite.yaml b/vendor/github.com/openshift/api/insights/v1alpha1/techpreview.datagather.testsuite.yaml deleted file mode 100644 index 188480705..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/techpreview.datagather.testsuite.yaml +++ /dev/null @@ -1,258 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this -name: "[TechPreview] DataGather" -crd: 0000_10_01_datagather.crd.yaml -tests: - onCreate: - - name: Should be able to create a minimal DataGather - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - expected: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} - onUpdate: - - name: status is present and startTime is added - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - startTime: 2023-03-13T11:34:06Z - expected: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - startTime: 2023-03-13T11:34:06Z - - name: startTime cannot be removed from status - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - startTime: 2023-03-13T11:34:06Z - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - expectedStatusError: "status: Invalid value: \"object\": cannot remove startTime attribute from status" - - name: startTime is immutable once set - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - startTime: 2023-03-13T11:34:06Z - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: xyz - startTime: 2023-03-13T12:34:06Z - expectedStatusError: "Invalid value: \"string\": startTime is immutable once set" - - name: Status is presentt and insightsRequestID is added - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - relatedObjects: - - name: periodic-job-xyz - resource: "pods" - group: "" - namespace: "openshift-insights" - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: testing-xyz - relatedObjects: - - name: periodic-job-xyz - resource: "pods" - group: "" - namespace: "openshift-insights" - expected: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: testing-xyz - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - - name: insightsRequestID cannot be removed from status - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: testing-xyz - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - expectedStatusError: "status: Invalid value: \"object\": cannot remove insightsRequestID attribute from status" - - name: insightsRequestID is immutable once set - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: testing-xyz - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - insightsRequestID: testing-xyz-updated - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - expectedStatusError: "Invalid value: \"string\": insightsRequestID is immutable once set" - - name: finishTime cannot be removed from status - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - finishTime: 2023-03-13T11:34:06Z - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - expectedStatusError: "status: Invalid value: \"object\": cannot remove finishTime attribute from status" - - name: dataGatherState cannot be removed from status - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Running - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - relatedObjects: - - name: periodic-job-xyz - group: "" - resource: "pods" - namespace: "openshift-insights" - expectedStatusError: "status: Invalid value: \"object\": cannot remove dataGatherState attribute from status" - - name: dataGatherState cannot transition from Running to Pending - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Running - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Pending - expectedStatusError: "status.dataGatherState: Invalid value: \"string\": dataGatherState cannot transition from Running to Pending" - - name: dataGatherState cannot transition from Completed to Pending - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Completed - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Pending - expectedStatusError: "status.dataGatherState: Invalid value: \"string\": dataGatherState cannot transition from Completed to Pending" - - name: dataGatherState cannot transition from Failed to Pending - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Failed - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Pending - expectedStatusError: "status.dataGatherState: Invalid value: \"string\": dataGatherState cannot transition from Failed to Pending" - - name: dataGatherState cannot transition from Completed to Running - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Completed - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Running - expectedStatusError: "status.dataGatherState: Invalid value: \"string\": dataGatherState cannot transition from Completed to Running" - - name: dataGatherState cannot transition from Failed to Running - initial: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Failed - updated: | - apiVersion: insights.openshift.io/v1alpha1 - kind: DataGather - spec: {} # No spec is required for a DataGather - status: - dataGatherState: Running - expectedStatusError: "status.dataGatherState: Invalid value: \"string\": dataGatherState cannot transition from Failed to Running" \ No newline at end of file diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/insights/v1alpha1/types_insights.go deleted file mode 100644 index 397c9481b..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/types_insights.go +++ /dev/null @@ -1,255 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +kubebuilder:resource:scope=Cluster -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// -// DataGather provides data gather configuration options and status for the particular Insights data gathering. -// -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 -type DataGather struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - // +kubebuilder:validation:Required - Spec DataGatherSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - // +optional - Status DataGatherStatus `json:"status"` -} - -type DataGatherSpec struct { - // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain - // in the Insights archive data. Valid values are "ClearText" and "ObfuscateNetworking". - // When set to ClearText the data is not obfuscated. - // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. - // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. - // The current default is ClearText. - // +optional - DataPolicy DataPolicy `json:"dataPolicy"` - // gatherers is a list of gatherers configurations. - // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. - // Run the following command to get the names of last active gatherers: - // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" - // +optional - Gatherers []GathererConfig `json:"gatherers"` -} - -const ( - // No data obfuscation - NoPolicy DataPolicy = "ClearText" - // IP addresses and cluster domain name are obfuscated - ObfuscateNetworking DataPolicy = "ObfuscateNetworking" - // Data gathering is running - Running DataGatherState = "Running" - // Data gathering is completed - Completed DataGatherState = "Completed" - // Data gathering failed - Failed DataGatherState = "Failed" - // Data gathering is pending - Pending DataGatherState = "Pending" - // Gatherer state marked as disabled, which means that the gatherer will not run. - Disabled GathererState = "Disabled" - // Gatherer state marked as enabled, which means that the gatherer will run. - Enabled GathererState = "Enabled" -) - -// dataPolicy declares valid data policy types -// +kubebuilder:validation:Enum="";ClearText;ObfuscateNetworking -type DataPolicy string - -// state declares valid gatherer state types. -// +kubebuilder:validation:Enum="";Enabled;Disabled -type GathererState string - -// gathererConfig allows to configure specific gatherers -type GathererConfig struct { - // name is the name of specific gatherer - // +kubebuilder:validation:Required - Name string `json:"name"` - // state allows you to configure specific gatherer. Valid values are "Enabled", "Disabled" and omitted. - // When omitted, this means no opinion and the platform is left to choose a reasonable default. - // The current default is Enabled. - // +optional - State GathererState `json:"state"` -} - -// dataGatherState declares valid gathering state types -// +kubebuilder:validation:Optional -// +kubebuilder:validation:Enum=Running;Completed;Failed;Pending -// +kubebuilder:validation:XValidation:rule="!(oldSelf == 'Running' && self == 'Pending')", message="dataGatherState cannot transition from Running to Pending" -// +kubebuilder:validation:XValidation:rule="!(oldSelf == 'Completed' && self == 'Pending')", message="dataGatherState cannot transition from Completed to Pending" -// +kubebuilder:validation:XValidation:rule="!(oldSelf == 'Failed' && self == 'Pending')", message="dataGatherState cannot transition from Failed to Pending" -// +kubebuilder:validation:XValidation:rule="!(oldSelf == 'Completed' && self == 'Running')", message="dataGatherState cannot transition from Completed to Running" -// +kubebuilder:validation:XValidation:rule="!(oldSelf == 'Failed' && self == 'Running')", message="dataGatherState cannot transition from Failed to Running" -type DataGatherState string - -// +kubebuilder:validation:XValidation:rule="(!has(oldSelf.insightsRequestID) || has(self.insightsRequestID))",message="cannot remove insightsRequestID attribute from status" -// +kubebuilder:validation:XValidation:rule="(!has(oldSelf.startTime) || has(self.startTime))",message="cannot remove startTime attribute from status" -// +kubebuilder:validation:XValidation:rule="(!has(oldSelf.finishTime) || has(self.finishTime))",message="cannot remove finishTime attribute from status" -// +kubebuilder:validation:XValidation:rule="(!has(oldSelf.dataGatherState) || has(self.dataGatherState))",message="cannot remove dataGatherState attribute from status" -// +kubebuilder:validation:Optional -type DataGatherStatus struct { - // conditions provide details on the status of the gatherer job. - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - // +optional - Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` - // dataGatherState reflects the current state of the data gathering process. - // +optional - State DataGatherState `json:"dataGatherState,omitempty"` - // gatherers is a list of active gatherers (and their statuses) in the last gathering. - // +listType=map - // +listMapKey=name - // +optional - Gatherers []GathererStatus `json:"gatherers,omitempty"` - // startTime is the time when Insights data gathering started. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="startTime is immutable once set" - // +optional - StartTime metav1.Time `json:"startTime,omitempty"` - // finishTime is the time when Insights data gathering finished. - // +kubebuilder:validation:Optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="finishTime is immutable once set" - // +optional - FinishTime metav1.Time `json:"finishTime,omitempty"` - // relatedObjects is a list of resources which are useful when debugging or inspecting the data - // gathering Pod - // +optional - RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` - // insightsRequestID is an Insights request ID to track the status of the - // Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="insightsRequestID is immutable once set" - // +kubebuilder:validation:Optional - // +optional - InsightsRequestID string `json:"insightsRequestID,omitempty"` - // insightsReport provides general Insights analysis results. - // When omitted, this means no data gathering has taken place yet or the - // corresponding Insights analysis (identified by "insightsRequestID") is not available. - // +optional - InsightsReport InsightsReport `json:"insightsReport,omitempty"` -} - -// gathererStatus represents information about a particular -// data gatherer. -type GathererStatus struct { - // conditions provide details on the status of each gatherer. - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` - // name is the name of the gatherer. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:MinLength=5 - Name string `json:"name"` - // lastGatherDuration represents the time spent gathering. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Pattern="^([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" - LastGatherDuration metav1.Duration `json:"lastGatherDuration"` -} - -// insightsReport provides Insights health check report based on the most -// recently sent Insights data. -type InsightsReport struct { - // downloadedAt is the time when the last Insights report was downloaded. - // An empty value means that there has not been any Insights report downloaded yet and - // it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). - // +optional - DownloadedAt metav1.Time `json:"downloadedAt,omitempty"` - // healthChecks provides basic information about active Insights health checks - // in a cluster. - // +listType=atomic - // +optional - HealthChecks []HealthCheck `json:"healthChecks,omitempty"` - // uri provides the URL link from which the report was downloaded. - // +kubebuilder:validation:Pattern=`^https:\/\/\S+` - // +optional - URI string `json:"uri,omitempty"` -} - -// healthCheck represents an Insights health check attributes. -type HealthCheck struct { - // description provides basic description of the healtcheck. - // +kubebuilder:validation:Required - // +kubebuilder:validation:MaxLength=2048 - // +kubebuilder:validation:MinLength=10 - Description string `json:"description"` - // totalRisk of the healthcheck. Indicator of the total risk posed - // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, - // and the higher the number, the more important the issue. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=4 - TotalRisk int32 `json:"totalRisk"` - // advisorURI provides the URL link to the Insights Advisor. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^https:\/\/\S+` - AdvisorURI string `json:"advisorURI"` - // state determines what the current state of the health check is. - // Health check is enabled by default and can be disabled - // by the user in the Insights advisor user interface. - // +kubebuilder:validation:Required - State HealthCheckState `json:"state"` -} - -// healthCheckState provides information about the status of the -// health check (for example, the health check may be marked as disabled by the user). -// +kubebuilder:validation:Enum:=Enabled;Disabled -type HealthCheckState string - -const ( - // enabled marks the health check as enabled - HealthCheckEnabled HealthCheckState = "Enabled" - // disabled marks the health check as disabled - HealthCheckDisabled HealthCheckState = "Disabled" -) - -// ObjectReference contains enough information to let you inspect or modify the referred object. -type ObjectReference struct { - // group is the API Group of the Resource. - // Enter empty string for the core group. - // This value should consist of only lowercase alphanumeric characters, hyphens and periods. - // Example: "", "apps", "build.openshift.io", etc. - // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - // +kubebuilder:validation:Required - Group string `json:"group"` - // resource is the type that is being referenced. - // It is normally the plural form of the resource kind in lowercase. - // This value should consist of only lowercase alphanumeric characters and hyphens. - // Example: "deployments", "deploymentconfigs", "pods", etc. - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - Resource string `json:"resource"` - // name of the referent. - // +kubebuilder:validation:Required - Name string `json:"name"` - // namespace of the referent. - // +optional - Namespace string `json:"namespace,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DataGatherList is a collection of items -// -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 -type DataGatherList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []DataGather `json:"items"` -} diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 1025eb12f..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,225 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataGather) DeepCopyInto(out *DataGather) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataGather. -func (in *DataGather) DeepCopy() *DataGather { - if in == nil { - return nil - } - out := new(DataGather) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DataGather) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataGatherList) DeepCopyInto(out *DataGatherList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DataGather, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataGatherList. -func (in *DataGatherList) DeepCopy() *DataGatherList { - if in == nil { - return nil - } - out := new(DataGatherList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DataGatherList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataGatherSpec) DeepCopyInto(out *DataGatherSpec) { - *out = *in - if in.Gatherers != nil { - in, out := &in.Gatherers, &out.Gatherers - *out = make([]GathererConfig, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataGatherSpec. -func (in *DataGatherSpec) DeepCopy() *DataGatherSpec { - if in == nil { - return nil - } - out := new(DataGatherSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataGatherStatus) DeepCopyInto(out *DataGatherStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Gatherers != nil { - in, out := &in.Gatherers, &out.Gatherers - *out = make([]GathererStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.StartTime.DeepCopyInto(&out.StartTime) - in.FinishTime.DeepCopyInto(&out.FinishTime) - if in.RelatedObjects != nil { - in, out := &in.RelatedObjects, &out.RelatedObjects - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - in.InsightsReport.DeepCopyInto(&out.InsightsReport) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataGatherStatus. -func (in *DataGatherStatus) DeepCopy() *DataGatherStatus { - if in == nil { - return nil - } - out := new(DataGatherStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GathererConfig) DeepCopyInto(out *GathererConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererConfig. -func (in *GathererConfig) DeepCopy() *GathererConfig { - if in == nil { - return nil - } - out := new(GathererConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GathererStatus) DeepCopyInto(out *GathererStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.LastGatherDuration = in.LastGatherDuration - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererStatus. -func (in *GathererStatus) DeepCopy() *GathererStatus { - if in == nil { - return nil - } - out := new(GathererStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HealthCheck) DeepCopyInto(out *HealthCheck) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck. -func (in *HealthCheck) DeepCopy() *HealthCheck { - if in == nil { - return nil - } - out := new(HealthCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InsightsReport) DeepCopyInto(out *InsightsReport) { - *out = *in - in.DownloadedAt.DeepCopyInto(&out.DownloadedAt) - if in.HealthChecks != nil { - in, out := &in.HealthChecks, &out.HealthChecks - *out = make([]HealthCheck, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsReport. -func (in *InsightsReport) DeepCopy() *InsightsReport { - if in == nil { - return nil - } - out := new(InsightsReport) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index 524450e51..000000000 --- a/vendor/github.com/openshift/api/insights/v1alpha1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,112 +0,0 @@ -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_DataGather = map[string]string{ - "": "\n\nDataGather provides data gather configuration options and status for the particular Insights data gathering.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (DataGather) SwaggerDoc() map[string]string { - return map_DataGather -} - -var map_DataGatherList = map[string]string{ - "": "DataGatherList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", -} - -func (DataGatherList) SwaggerDoc() map[string]string { - return map_DataGatherList -} - -var map_DataGatherSpec = map[string]string{ - "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"ClearText\" and \"ObfuscateNetworking\". When set to ClearText the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is ClearText.", - "gatherers": "gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", -} - -func (DataGatherSpec) SwaggerDoc() map[string]string { - return map_DataGatherSpec -} - -var map_DataGatherStatus = map[string]string{ - "conditions": "conditions provide details on the status of the gatherer job.", - "dataGatherState": "dataGatherState reflects the current state of the data gathering process.", - "gatherers": "gatherers is a list of active gatherers (and their statuses) in the last gathering.", - "startTime": "startTime is the time when Insights data gathering started.", - "finishTime": "finishTime is the time when Insights data gathering finished.", - "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting the data gathering Pod", - "insightsRequestID": "insightsRequestID is an Insights request ID to track the status of the Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive.", - "insightsReport": "insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by \"insightsRequestID\") is not available.", -} - -func (DataGatherStatus) SwaggerDoc() map[string]string { - return map_DataGatherStatus -} - -var map_GathererConfig = map[string]string{ - "": "gathererConfig allows to configure specific gatherers", - "name": "name is the name of specific gatherer", - "state": "state allows you to configure specific gatherer. Valid values are \"Enabled\", \"Disabled\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default. The current default is Enabled.", -} - -func (GathererConfig) SwaggerDoc() map[string]string { - return map_GathererConfig -} - -var map_GathererStatus = map[string]string{ - "": "gathererStatus represents information about a particular data gatherer.", - "conditions": "conditions provide details on the status of each gatherer.", - "name": "name is the name of the gatherer.", - "lastGatherDuration": "lastGatherDuration represents the time spent gathering.", -} - -func (GathererStatus) SwaggerDoc() map[string]string { - return map_GathererStatus -} - -var map_HealthCheck = map[string]string{ - "": "healthCheck represents an Insights health check attributes.", - "description": "description provides basic description of the healtcheck.", - "totalRisk": "totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue.", - "advisorURI": "advisorURI provides the URL link to the Insights Advisor.", - "state": "state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface.", -} - -func (HealthCheck) SwaggerDoc() map[string]string { - return map_HealthCheck -} - -var map_InsightsReport = map[string]string{ - "": "insightsReport provides Insights health check report based on the most recently sent Insights data.", - "downloadedAt": "downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled).", - "healthChecks": "healthChecks provides basic information about active Insights health checks in a cluster.", - "uri": "uri provides the URL link from which the report was downloaded.", -} - -func (InsightsReport) SwaggerDoc() map[string]string { - return map_InsightsReport -} - -var map_ObjectReference = map[string]string{ - "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "group": "group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: \"\", \"apps\", \"build.openshift.io\", etc.", - "resource": "resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", - "name": "name of the referent.", - "namespace": "namespace of the referent.", -} - -func (ObjectReference) SwaggerDoc() map[string]string { - return map_ObjectReference -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagather.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagather.go deleted file mode 100644 index 876df7750..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagather.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - insightsv1alpha1 "github.com/openshift/api/insights/v1alpha1" - internal "github.com/openshift/client-go/insights/applyconfigurations/internal" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// DataGatherApplyConfiguration represents an declarative configuration of the DataGather type for use -// with apply. -type DataGatherApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DataGatherSpecApplyConfiguration `json:"spec,omitempty"` - Status *DataGatherStatusApplyConfiguration `json:"status,omitempty"` -} - -// DataGather constructs an declarative configuration of the DataGather type for use with -// apply. -func DataGather(name string) *DataGatherApplyConfiguration { - b := &DataGatherApplyConfiguration{} - b.WithName(name) - b.WithKind("DataGather") - b.WithAPIVersion("insights.openshift.io/v1alpha1") - return b -} - -// ExtractDataGather extracts the applied configuration owned by fieldManager from -// dataGather. If no managedFields are found in dataGather for fieldManager, a -// DataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// dataGather must be a unmodified DataGather API object that was retrieved from the Kubernetes API. -// ExtractDataGather provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractDataGather(dataGather *insightsv1alpha1.DataGather, fieldManager string) (*DataGatherApplyConfiguration, error) { - return extractDataGather(dataGather, fieldManager, "") -} - -// ExtractDataGatherStatus is the same as ExtractDataGather except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractDataGatherStatus(dataGather *insightsv1alpha1.DataGather, fieldManager string) (*DataGatherApplyConfiguration, error) { - return extractDataGather(dataGather, fieldManager, "status") -} - -func extractDataGather(dataGather *insightsv1alpha1.DataGather, fieldManager string, subresource string) (*DataGatherApplyConfiguration, error) { - b := &DataGatherApplyConfiguration{} - err := managedfields.ExtractInto(dataGather, internal.Parser().Type("com.github.openshift.api.insights.v1alpha1.DataGather"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(dataGather.Name) - - b.WithKind("DataGather") - b.WithAPIVersion("insights.openshift.io/v1alpha1") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithKind(value string) *DataGatherApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithAPIVersion(value string) *DataGatherApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithName(value string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithGenerateName(value string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithNamespace(value string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithUID(value types.UID) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithResourceVersion(value string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithGeneration(value int64) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *DataGatherApplyConfiguration) WithLabels(entries map[string]string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *DataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DataGatherApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *DataGatherApplyConfiguration) WithFinalizers(values ...string) *DataGatherApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *DataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithSpec(value *DataGatherSpecApplyConfiguration) *DataGatherApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *DataGatherApplyConfiguration) WithStatus(value *DataGatherStatusApplyConfiguration) *DataGatherApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherspec.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherspec.go deleted file mode 100644 index aa9a85d71..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherspec.go +++ /dev/null @@ -1,41 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/api/insights/v1alpha1" -) - -// DataGatherSpecApplyConfiguration represents an declarative configuration of the DataGatherSpec type for use -// with apply. -type DataGatherSpecApplyConfiguration struct { - DataPolicy *v1alpha1.DataPolicy `json:"dataPolicy,omitempty"` - Gatherers []GathererConfigApplyConfiguration `json:"gatherers,omitempty"` -} - -// DataGatherSpecApplyConfiguration constructs an declarative configuration of the DataGatherSpec type for use with -// apply. -func DataGatherSpec() *DataGatherSpecApplyConfiguration { - return &DataGatherSpecApplyConfiguration{} -} - -// WithDataPolicy sets the DataPolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DataPolicy field is set to the value of the last call. -func (b *DataGatherSpecApplyConfiguration) WithDataPolicy(value v1alpha1.DataPolicy) *DataGatherSpecApplyConfiguration { - b.DataPolicy = &value - return b -} - -// WithGatherers adds the given value to the Gatherers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Gatherers field. -func (b *DataGatherSpecApplyConfiguration) WithGatherers(values ...*GathererConfigApplyConfiguration) *DataGatherSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithGatherers") - } - b.Gatherers = append(b.Gatherers, *values[i]) - } - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherstatus.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherstatus.go deleted file mode 100644 index bdeb15ef4..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/datagatherstatus.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/api/insights/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// DataGatherStatusApplyConfiguration represents an declarative configuration of the DataGatherStatus type for use -// with apply. -type DataGatherStatusApplyConfiguration struct { - Conditions []v1.Condition `json:"conditions,omitempty"` - State *v1alpha1.DataGatherState `json:"dataGatherState,omitempty"` - Gatherers []GathererStatusApplyConfiguration `json:"gatherers,omitempty"` - StartTime *v1.Time `json:"startTime,omitempty"` - FinishTime *v1.Time `json:"finishTime,omitempty"` - RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` - InsightsRequestID *string `json:"insightsRequestID,omitempty"` - InsightsReport *InsightsReportApplyConfiguration `json:"insightsReport,omitempty"` -} - -// DataGatherStatusApplyConfiguration constructs an declarative configuration of the DataGatherStatus type for use with -// apply. -func DataGatherStatus() *DataGatherStatusApplyConfiguration { - return &DataGatherStatusApplyConfiguration{} -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *DataGatherStatusApplyConfiguration) WithConditions(values ...v1.Condition) *DataGatherStatusApplyConfiguration { - for i := range values { - b.Conditions = append(b.Conditions, values[i]) - } - return b -} - -// WithState sets the State field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the State field is set to the value of the last call. -func (b *DataGatherStatusApplyConfiguration) WithState(value v1alpha1.DataGatherState) *DataGatherStatusApplyConfiguration { - b.State = &value - return b -} - -// WithGatherers adds the given value to the Gatherers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Gatherers field. -func (b *DataGatherStatusApplyConfiguration) WithGatherers(values ...*GathererStatusApplyConfiguration) *DataGatherStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithGatherers") - } - b.Gatherers = append(b.Gatherers, *values[i]) - } - return b -} - -// WithStartTime sets the StartTime field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StartTime field is set to the value of the last call. -func (b *DataGatherStatusApplyConfiguration) WithStartTime(value v1.Time) *DataGatherStatusApplyConfiguration { - b.StartTime = &value - return b -} - -// WithFinishTime sets the FinishTime field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FinishTime field is set to the value of the last call. -func (b *DataGatherStatusApplyConfiguration) WithFinishTime(value v1.Time) *DataGatherStatusApplyConfiguration { - b.FinishTime = &value - return b -} - -// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RelatedObjects field. -func (b *DataGatherStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *DataGatherStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRelatedObjects") - } - b.RelatedObjects = append(b.RelatedObjects, *values[i]) - } - return b -} - -// WithInsightsRequestID sets the InsightsRequestID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InsightsRequestID field is set to the value of the last call. -func (b *DataGatherStatusApplyConfiguration) WithInsightsRequestID(value string) *DataGatherStatusApplyConfiguration { - b.InsightsRequestID = &value - return b -} - -// WithInsightsReport sets the InsightsReport field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InsightsReport field is set to the value of the last call. -func (b *DataGatherStatusApplyConfiguration) WithInsightsReport(value *InsightsReportApplyConfiguration) *DataGatherStatusApplyConfiguration { - b.InsightsReport = value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererconfig.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererconfig.go deleted file mode 100644 index 8f7b05f1a..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererconfig.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/api/insights/v1alpha1" -) - -// GathererConfigApplyConfiguration represents an declarative configuration of the GathererConfig type for use -// with apply. -type GathererConfigApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *v1alpha1.GathererState `json:"state,omitempty"` -} - -// GathererConfigApplyConfiguration constructs an declarative configuration of the GathererConfig type for use with -// apply. -func GathererConfig() *GathererConfigApplyConfiguration { - return &GathererConfigApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *GathererConfigApplyConfiguration) WithName(value string) *GathererConfigApplyConfiguration { - b.Name = &value - return b -} - -// WithState sets the State field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the State field is set to the value of the last call. -func (b *GathererConfigApplyConfiguration) WithState(value v1alpha1.GathererState) *GathererConfigApplyConfiguration { - b.State = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererstatus.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererstatus.go deleted file mode 100644 index 6bd23bfc5..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/gathererstatus.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// GathererStatusApplyConfiguration represents an declarative configuration of the GathererStatus type for use -// with apply. -type GathererStatusApplyConfiguration struct { - Conditions []v1.Condition `json:"conditions,omitempty"` - Name *string `json:"name,omitempty"` - LastGatherDuration *v1.Duration `json:"lastGatherDuration,omitempty"` -} - -// GathererStatusApplyConfiguration constructs an declarative configuration of the GathererStatus type for use with -// apply. -func GathererStatus() *GathererStatusApplyConfiguration { - return &GathererStatusApplyConfiguration{} -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *GathererStatusApplyConfiguration) WithConditions(values ...v1.Condition) *GathererStatusApplyConfiguration { - for i := range values { - b.Conditions = append(b.Conditions, values[i]) - } - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *GathererStatusApplyConfiguration) WithName(value string) *GathererStatusApplyConfiguration { - b.Name = &value - return b -} - -// WithLastGatherDuration sets the LastGatherDuration field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LastGatherDuration field is set to the value of the last call. -func (b *GathererStatusApplyConfiguration) WithLastGatherDuration(value v1.Duration) *GathererStatusApplyConfiguration { - b.LastGatherDuration = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/healthcheck.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/healthcheck.go deleted file mode 100644 index 4d72ec58a..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/healthcheck.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/openshift/api/insights/v1alpha1" -) - -// HealthCheckApplyConfiguration represents an declarative configuration of the HealthCheck type for use -// with apply. -type HealthCheckApplyConfiguration struct { - Description *string `json:"description,omitempty"` - TotalRisk *int32 `json:"totalRisk,omitempty"` - AdvisorURI *string `json:"advisorURI,omitempty"` - State *v1alpha1.HealthCheckState `json:"state,omitempty"` -} - -// HealthCheckApplyConfiguration constructs an declarative configuration of the HealthCheck type for use with -// apply. -func HealthCheck() *HealthCheckApplyConfiguration { - return &HealthCheckApplyConfiguration{} -} - -// WithDescription sets the Description field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Description field is set to the value of the last call. -func (b *HealthCheckApplyConfiguration) WithDescription(value string) *HealthCheckApplyConfiguration { - b.Description = &value - return b -} - -// WithTotalRisk sets the TotalRisk field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the TotalRisk field is set to the value of the last call. -func (b *HealthCheckApplyConfiguration) WithTotalRisk(value int32) *HealthCheckApplyConfiguration { - b.TotalRisk = &value - return b -} - -// WithAdvisorURI sets the AdvisorURI field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AdvisorURI field is set to the value of the last call. -func (b *HealthCheckApplyConfiguration) WithAdvisorURI(value string) *HealthCheckApplyConfiguration { - b.AdvisorURI = &value - return b -} - -// WithState sets the State field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the State field is set to the value of the last call. -func (b *HealthCheckApplyConfiguration) WithState(value v1alpha1.HealthCheckState) *HealthCheckApplyConfiguration { - b.State = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/insightsreport.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/insightsreport.go deleted file mode 100644 index e86f64ebf..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/insightsreport.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// InsightsReportApplyConfiguration represents an declarative configuration of the InsightsReport type for use -// with apply. -type InsightsReportApplyConfiguration struct { - DownloadedAt *v1.Time `json:"downloadedAt,omitempty"` - HealthChecks []HealthCheckApplyConfiguration `json:"healthChecks,omitempty"` - URI *string `json:"uri,omitempty"` -} - -// InsightsReportApplyConfiguration constructs an declarative configuration of the InsightsReport type for use with -// apply. -func InsightsReport() *InsightsReportApplyConfiguration { - return &InsightsReportApplyConfiguration{} -} - -// WithDownloadedAt sets the DownloadedAt field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DownloadedAt field is set to the value of the last call. -func (b *InsightsReportApplyConfiguration) WithDownloadedAt(value v1.Time) *InsightsReportApplyConfiguration { - b.DownloadedAt = &value - return b -} - -// WithHealthChecks adds the given value to the HealthChecks field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HealthChecks field. -func (b *InsightsReportApplyConfiguration) WithHealthChecks(values ...*HealthCheckApplyConfiguration) *InsightsReportApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHealthChecks") - } - b.HealthChecks = append(b.HealthChecks, *values[i]) - } - return b -} - -// WithURI sets the URI field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the URI field is set to the value of the last call. -func (b *InsightsReportApplyConfiguration) WithURI(value string) *InsightsReportApplyConfiguration { - b.URI = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/objectreference.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/objectreference.go deleted file mode 100644 index 2cf5ed707..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1/objectreference.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use -// with apply. -type ObjectReferenceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` -} - -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with -// apply. -func ObjectReference() *ObjectReferenceApplyConfiguration { - return &ObjectReferenceApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *ObjectReferenceApplyConfiguration) WithGroup(value string) *ObjectReferenceApplyConfiguration { - b.Group = &value - return b -} - -// WithResource sets the Resource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *ObjectReferenceApplyConfiguration) WithResource(value string) *ObjectReferenceApplyConfiguration { - b.Resource = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ObjectReferenceApplyConfiguration) WithName(value string) *ObjectReferenceApplyConfiguration { - b.Name = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ObjectReferenceApplyConfiguration) WithNamespace(value string) *ObjectReferenceApplyConfiguration { - b.Namespace = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/insights/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/insights/applyconfigurations/internal/internal.go deleted file mode 100644 index b68a54174..000000000 --- a/vendor/github.com/openshift/client-go/insights/applyconfigurations/internal/internal.go +++ /dev/null @@ -1,362 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package internal - -import ( - "fmt" - "sync" - - typed "sigs.k8s.io/structured-merge-diff/v4/typed" -) - -func Parser() *typed.Parser { - parserOnce.Do(func() { - var err error - parser, err = typed.NewParser(schemaYAML) - if err != nil { - panic(fmt.Sprintf("Failed to parse schema: %v", err)) - } - }) - return parser -} - -var parserOnce sync.Once -var parser *typed.Parser -var schemaYAML = typed.YAMLObject(`types: -- name: com.github.openshift.api.insights.v1alpha1.DataGather - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: com.github.openshift.api.insights.v1alpha1.DataGatherSpec - default: {} - - name: status - type: - namedType: com.github.openshift.api.insights.v1alpha1.DataGatherStatus - default: {} -- name: com.github.openshift.api.insights.v1alpha1.DataGatherSpec - map: - fields: - - name: dataPolicy - type: - scalar: string - default: "" - - name: gatherers - type: - list: - elementType: - namedType: com.github.openshift.api.insights.v1alpha1.GathererConfig - elementRelationship: atomic -- name: com.github.openshift.api.insights.v1alpha1.DataGatherStatus - map: - fields: - - name: conditions - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type - - name: dataGatherState - type: - scalar: string - - name: finishTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: gatherers - type: - list: - elementType: - namedType: com.github.openshift.api.insights.v1alpha1.GathererStatus - elementRelationship: associative - keys: - - name - - name: insightsReport - type: - namedType: com.github.openshift.api.insights.v1alpha1.InsightsReport - default: {} - - name: insightsRequestID - type: - scalar: string - - name: relatedObjects - type: - list: - elementType: - namedType: com.github.openshift.api.insights.v1alpha1.ObjectReference - elementRelationship: atomic - - name: startTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} -- name: com.github.openshift.api.insights.v1alpha1.GathererConfig - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: state - type: - scalar: string - default: "" -- name: com.github.openshift.api.insights.v1alpha1.GathererStatus - map: - fields: - - name: conditions - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type - - name: lastGatherDuration - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration - default: 0 - - name: name - type: - scalar: string - default: "" -- name: com.github.openshift.api.insights.v1alpha1.HealthCheck - map: - fields: - - name: advisorURI - type: - scalar: string - default: "" - - name: description - type: - scalar: string - default: "" - - name: state - type: - scalar: string - default: "" - - name: totalRisk - type: - scalar: numeric - default: 0 -- name: com.github.openshift.api.insights.v1alpha1.InsightsReport - map: - fields: - - name: downloadedAt - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: healthChecks - type: - list: - elementType: - namedType: com.github.openshift.api.insights.v1alpha1.HealthCheck - elementRelationship: atomic - - name: uri - type: - scalar: string -- name: com.github.openshift.api.insights.v1alpha1.ObjectReference - map: - fields: - - name: group - type: - scalar: string - default: "" - - name: name - type: - scalar: string - default: "" - - name: namespace - type: - scalar: string - - name: resource - type: - scalar: string - default: "" -- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - map: - fields: - - name: lastTransitionTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message - type: - scalar: string - default: "" - - name: observedGeneration - type: - scalar: numeric - - name: reason - type: - scalar: string - default: "" - - name: status - type: - scalar: string - default: "" - - name: type - type: - scalar: string - default: "" -- name: io.k8s.apimachinery.pkg.apis.meta.v1.Duration - scalar: string -- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 - map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable -- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry - map: - fields: - - name: apiVersion - type: - scalar: string - - name: fieldsType - type: - scalar: string - - name: fieldsV1 - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 - - name: manager - type: - scalar: string - - name: operation - type: - scalar: string - - name: subresource - type: - scalar: string - - name: time - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time -- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - map: - fields: - - name: annotations - type: - map: - elementType: - scalar: string - - name: creationTimestamp - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: deletionGracePeriodSeconds - type: - scalar: numeric - - name: deletionTimestamp - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: finalizers - type: - list: - elementType: - scalar: string - elementRelationship: associative - - name: generateName - type: - scalar: string - - name: generation - type: - scalar: numeric - - name: labels - type: - map: - elementType: - scalar: string - - name: managedFields - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry - elementRelationship: atomic - - name: name - type: - scalar: string - - name: namespace - type: - scalar: string - - name: ownerReferences - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference - elementRelationship: associative - keys: - - uid - - name: resourceVersion - type: - scalar: string - - name: selfLink - type: - scalar: string - - name: uid - type: - scalar: string -- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference - map: - fields: - - name: apiVersion - type: - scalar: string - default: "" - - name: blockOwnerDeletion - type: - scalar: boolean - - name: controller - type: - scalar: boolean - - name: kind - type: - scalar: string - default: "" - - name: name - type: - scalar: string - default: "" - - name: uid - type: - scalar: string - default: "" - elementRelationship: atomic -- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time - scalar: untyped -- name: __untyped_atomic_ - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic -- name: __untyped_deduced_ - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable -`) diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/clientset.go deleted file mode 100644 index 1fac36c30..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/clientset.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - "fmt" - "net/http" - - insightsv1alpha1 "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - InsightsV1alpha1() insightsv1alpha1.InsightsV1alpha1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - insightsV1alpha1 *insightsv1alpha1.InsightsV1alpha1Client -} - -// InsightsV1alpha1 retrieves the InsightsV1alpha1Client -func (c *Clientset) InsightsV1alpha1() insightsv1alpha1.InsightsV1alpha1Interface { - return c.insightsV1alpha1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfig will generate a rate-limiter in configShallowCopy. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - - if configShallowCopy.UserAgent == "" { - configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() - } - - // share the transport between all clients - httpClient, err := rest.HTTPClientFor(&configShallowCopy) - if err != nil { - return nil, err - } - - return NewForConfigAndClient(&configShallowCopy, httpClient) -} - -// NewForConfigAndClient creates a new Clientset for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. -func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") - } - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - - var cs Clientset - var err error - cs.insightsV1alpha1, err = insightsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - cs, err := NewForConfig(c) - if err != nil { - panic(err) - } - return cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.insightsV1alpha1 = insightsv1alpha1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/doc.go deleted file mode 100644 index 0e0c2a890..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/clientset_generated.go deleted file mode 100644 index b58c3c0bf..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/clientset_generated.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - clientset "github.com/openshift/client-go/insights/clientset/versioned" - insightsv1alpha1 "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" - fakeinsightsv1alpha1 "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{tracker: o} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - -var ( - _ clientset.Interface = &Clientset{} - _ testing.FakeClient = &Clientset{} -) - -// InsightsV1alpha1 retrieves the InsightsV1alpha1Client -func (c *Clientset) InsightsV1alpha1() insightsv1alpha1.InsightsV1alpha1Interface { - return &fakeinsightsv1alpha1.FakeInsightsV1alpha1{Fake: &c.Fake} -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/doc.go deleted file mode 100644 index 3630ed1cd..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/register.go deleted file mode 100644 index 640621b06..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/fake/register.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - insightsv1alpha1 "github.com/openshift/api/insights/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) - -var localSchemeBuilder = runtime.SchemeBuilder{ - insightsv1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/doc.go deleted file mode 100644 index 14db57a58..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/register.go deleted file mode 100644 index b66416e75..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - insightsv1alpha1 "github.com/openshift/api/insights/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - insightsv1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/datagather.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/datagather.go deleted file mode 100644 index 39164ab75..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/datagather.go +++ /dev/null @@ -1,227 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "github.com/openshift/api/insights/v1alpha1" - insightsv1alpha1 "github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1" - scheme "github.com/openshift/client-go/insights/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// DataGathersGetter has a method to return a DataGatherInterface. -// A group's client should implement this interface. -type DataGathersGetter interface { - DataGathers() DataGatherInterface -} - -// DataGatherInterface has methods to work with DataGather resources. -type DataGatherInterface interface { - Create(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.CreateOptions) (*v1alpha1.DataGather, error) - Update(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (*v1alpha1.DataGather, error) - UpdateStatus(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (*v1alpha1.DataGather, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.DataGather, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.DataGatherList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DataGather, err error) - Apply(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) - ApplyStatus(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) - DataGatherExpansion -} - -// dataGathers implements DataGatherInterface -type dataGathers struct { - client rest.Interface -} - -// newDataGathers returns a DataGathers -func newDataGathers(c *InsightsV1alpha1Client) *dataGathers { - return &dataGathers{ - client: c.RESTClient(), - } -} - -// Get takes name of the dataGather, and returns the corresponding dataGather object, and an error if there is any. -func (c *dataGathers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DataGather, err error) { - result = &v1alpha1.DataGather{} - err = c.client.Get(). - Resource("datagathers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DataGathers that match those selectors. -func (c *dataGathers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DataGatherList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.DataGatherList{} - err = c.client.Get(). - Resource("datagathers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested dataGathers. -func (c *dataGathers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("datagathers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a dataGather and creates it. Returns the server's representation of the dataGather, and an error, if there is any. -func (c *dataGathers) Create(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.CreateOptions) (result *v1alpha1.DataGather, err error) { - result = &v1alpha1.DataGather{} - err = c.client.Post(). - Resource("datagathers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dataGather). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a dataGather and updates it. Returns the server's representation of the dataGather, and an error, if there is any. -func (c *dataGathers) Update(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (result *v1alpha1.DataGather, err error) { - result = &v1alpha1.DataGather{} - err = c.client.Put(). - Resource("datagathers"). - Name(dataGather.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dataGather). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *dataGathers) UpdateStatus(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (result *v1alpha1.DataGather, err error) { - result = &v1alpha1.DataGather{} - err = c.client.Put(). - Resource("datagathers"). - Name(dataGather.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(dataGather). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the dataGather and deletes it. Returns an error if one occurs. -func (c *dataGathers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("datagathers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *dataGathers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("datagathers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched dataGather. -func (c *dataGathers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DataGather, err error) { - result = &v1alpha1.DataGather{} - err = c.client.Patch(pt). - Resource("datagathers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied dataGather. -func (c *dataGathers) Apply(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) { - if dataGather == nil { - return nil, fmt.Errorf("dataGather provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(dataGather) - if err != nil { - return nil, err - } - name := dataGather.Name - if name == nil { - return nil, fmt.Errorf("dataGather.Name must be provided to Apply") - } - result = &v1alpha1.DataGather{} - err = c.client.Patch(types.ApplyPatchType). - Resource("datagathers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *dataGathers) ApplyStatus(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) { - if dataGather == nil { - return nil, fmt.Errorf("dataGather provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(dataGather) - if err != nil { - return nil, err - } - - name := dataGather.Name - if name == nil { - return nil, fmt.Errorf("dataGather.Name must be provided to Apply") - } - - result = &v1alpha1.DataGather{} - err = c.client.Patch(types.ApplyPatchType). - Resource("datagathers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/doc.go deleted file mode 100644 index 93a7ca4e0..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/doc.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/doc.go deleted file mode 100644 index 2b5ba4c8e..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_datagather.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_datagather.go deleted file mode 100644 index 2be351028..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_datagather.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "github.com/openshift/api/insights/v1alpha1" - insightsv1alpha1 "github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeDataGathers implements DataGatherInterface -type FakeDataGathers struct { - Fake *FakeInsightsV1alpha1 -} - -var datagathersResource = schema.GroupVersionResource{Group: "insights.openshift.io", Version: "v1alpha1", Resource: "datagathers"} - -var datagathersKind = schema.GroupVersionKind{Group: "insights.openshift.io", Version: "v1alpha1", Kind: "DataGather"} - -// Get takes name of the dataGather, and returns the corresponding dataGather object, and an error if there is any. -func (c *FakeDataGathers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DataGather, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(datagathersResource, name), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// List takes label and field selectors, and returns the list of DataGathers that match those selectors. -func (c *FakeDataGathers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DataGatherList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(datagathersResource, datagathersKind, opts), &v1alpha1.DataGatherList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.DataGatherList{ListMeta: obj.(*v1alpha1.DataGatherList).ListMeta} - for _, item := range obj.(*v1alpha1.DataGatherList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested dataGathers. -func (c *FakeDataGathers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(datagathersResource, opts)) -} - -// Create takes the representation of a dataGather and creates it. Returns the server's representation of the dataGather, and an error, if there is any. -func (c *FakeDataGathers) Create(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.CreateOptions) (result *v1alpha1.DataGather, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(datagathersResource, dataGather), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// Update takes the representation of a dataGather and updates it. Returns the server's representation of the dataGather, and an error, if there is any. -func (c *FakeDataGathers) Update(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (result *v1alpha1.DataGather, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(datagathersResource, dataGather), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDataGathers) UpdateStatus(ctx context.Context, dataGather *v1alpha1.DataGather, opts v1.UpdateOptions) (*v1alpha1.DataGather, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(datagathersResource, "status", dataGather), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// Delete takes name of the dataGather and deletes it. Returns an error if one occurs. -func (c *FakeDataGathers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(datagathersResource, name, opts), &v1alpha1.DataGather{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDataGathers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(datagathersResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.DataGatherList{}) - return err -} - -// Patch applies the patch and returns the patched dataGather. -func (c *FakeDataGathers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DataGather, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(datagathersResource, name, pt, data, subresources...), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied dataGather. -func (c *FakeDataGathers) Apply(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) { - if dataGather == nil { - return nil, fmt.Errorf("dataGather provided to Apply must not be nil") - } - data, err := json.Marshal(dataGather) - if err != nil { - return nil, err - } - name := dataGather.Name - if name == nil { - return nil, fmt.Errorf("dataGather.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(datagathersResource, *name, types.ApplyPatchType, data), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDataGathers) ApplyStatus(ctx context.Context, dataGather *insightsv1alpha1.DataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.DataGather, err error) { - if dataGather == nil { - return nil, fmt.Errorf("dataGather provided to Apply must not be nil") - } - data, err := json.Marshal(dataGather) - if err != nil { - return nil, err - } - name := dataGather.Name - if name == nil { - return nil, fmt.Errorf("dataGather.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(datagathersResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.DataGather{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.DataGather), err -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_insights_client.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_insights_client.go deleted file mode 100644 index 8c272ce1f..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake/fake_insights_client.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeInsightsV1alpha1 struct { - *testing.Fake -} - -func (c *FakeInsightsV1alpha1) DataGathers() v1alpha1.DataGatherInterface { - return &FakeDataGathers{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeInsightsV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/generated_expansion.go deleted file mode 100644 index aa41eb20c..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,5 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type DataGatherExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/insights_client.go b/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/insights_client.go deleted file mode 100644 index 020bb5a39..000000000 --- a/vendor/github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/insights_client.go +++ /dev/null @@ -1,91 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - v1alpha1 "github.com/openshift/api/insights/v1alpha1" - "github.com/openshift/client-go/insights/clientset/versioned/scheme" - rest "k8s.io/client-go/rest" -) - -type InsightsV1alpha1Interface interface { - RESTClient() rest.Interface - DataGathersGetter -} - -// InsightsV1alpha1Client is used to interact with features provided by the insights.openshift.io group. -type InsightsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *InsightsV1alpha1Client) DataGathers() DataGatherInterface { - return newDataGathers(c) -} - -// NewForConfig creates a new InsightsV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*InsightsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new InsightsV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*InsightsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &InsightsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new InsightsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *InsightsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new InsightsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *InsightsV1alpha1Client { - return &InsightsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *InsightsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/modules.txt b/vendor/modules.txt index f9569bdb8..e40307eca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -200,7 +200,6 @@ github.com/openshift/api/machine/v1beta1 github.com/openshift/api/monitoring github.com/openshift/api/monitoring/v1alpha1 github.com/openshift/api/network -github.com/openshift/api/insights/v1alpha1 github.com/openshift/api/network/v1 github.com/openshift/api/networkoperator github.com/openshift/api/networkoperator/v1 @@ -281,13 +280,6 @@ github.com/openshift/client-go/imageregistry/clientset/versioned/fake github.com/openshift/client-go/imageregistry/clientset/versioned/scheme github.com/openshift/client-go/imageregistry/clientset/versioned/typed/imageregistry/v1 github.com/openshift/client-go/imageregistry/clientset/versioned/typed/imageregistry/v1/fake -github.com/openshift/client-go/insights/applyconfigurations/insights/v1alpha1 -github.com/openshift/client-go/insights/applyconfigurations/internal -github.com/openshift/client-go/insights/clientset/versioned -github.com/openshift/client-go/insights/clientset/versioned/fake -github.com/openshift/client-go/insights/clientset/versioned/scheme -github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1 -github.com/openshift/client-go/insights/clientset/versioned/typed/insights/v1alpha1/fake github.com/openshift/client-go/network/applyconfigurations/internal github.com/openshift/client-go/network/applyconfigurations/network/v1 github.com/openshift/client-go/network/clientset/versioned