From a9eccce1502c104268747db12c450372756ffedd Mon Sep 17 00:00:00 2001 From: Antonin Stefanutti Date: Sat, 16 Mar 2019 16:03:57 +0100 Subject: [PATCH] Upgrade Operator SDK to version 0.6.0 --- Gopkg.lock | 6 +- Gopkg.toml | 2 +- .../operator-sdk/cmd/generate/openapi.go | 68 +--- .../commands/operator-sdk/cmd/migrate.go | 1 + .../commands/operator-sdk/cmd/new.go | 43 ++- .../operator-sdk/cmd/olm-catalog/gen-csv.go | 82 ++++- .../commands/operator-sdk/cmd/scorecard.go | 7 +- .../operator-sdk/cmd/scorecard/basic_tests.go | 206 ++--------- .../operator-sdk/cmd/scorecard/olm_tests.go | 292 ++++++++++------ .../cmd/scorecard/resource_handler.go | 124 +++++-- .../operator-sdk/cmd/scorecard/scorecard.go | 322 +++++++++-------- .../cmd/scorecard/test_definitions.go | 324 ++++++++++++++++++ .../commands/operator-sdk/cmd/test/cluster.go | 3 +- .../commands/operator-sdk/cmd/test/local.go | 28 +- .../image/ansible/scaffold-ansible-image.go | 1 + .../images/scorecard-proxy/cmd/proxy/main.go | 2 +- .../internal/util/fileutil/file_util.go | 4 - .../operator-sdk/internal/util/k8sutil/crd.go | 48 +++ .../internal/util/k8sutil/object.go | 40 +++ .../internal/util/yamlutil/manifest.go | 22 +- .../pkg/ansible/controller/controller.go | 4 +- .../pkg/ansible/controller/reconcile.go | 66 +++- .../operator-sdk/pkg/ansible/flags/flag.go | 16 + .../pkg/ansible/operator/operator.go | 34 ++ .../operator-sdk/pkg/ansible/proxy/proxy.go | 19 +- .../operator-sdk/pkg/ansible/run.go | 1 + .../pkg/ansible/runner/eventapi/types.go | 12 + .../runner/internal/inputdir/inputdir.go | 2 +- .../operator-sdk/pkg/ansible/runner/runner.go | 14 + .../operator-sdk/pkg/helm/run.go | 8 +- .../operator-sdk/pkg/k8sutil/constants.go | 6 +- .../operator-sdk/pkg/k8sutil/k8sutil.go | 33 ++ .../operator-sdk/pkg/leader/leader.go | 22 +- .../operator-sdk/pkg/log/zap/flags.go | 27 +- .../operator-sdk/pkg/log/zap/logger.go | 9 +- .../operator-sdk/pkg/metrics/metrics.go | 119 +++++-- .../operator-sdk/pkg/ready/ready.go | 2 +- .../operator-sdk/pkg/scaffold/addtoscheme.go | 4 +- .../pkg/scaffold/ansible/ao_logs.go | 50 +++ .../pkg/scaffold/ansible/deploy_operator.go | 20 +- .../pkg/scaffold/ansible/dockerfilehybrid.go | 1 + .../pkg/scaffold/ansible/gopkgtoml.go | 2 +- .../pkg/scaffold/ansible/k8s_status.go | 5 +- .../ansible/molecule_default_molecule.go | 1 + .../ansible/molecule_test_cluster_playbook.go | 2 +- .../ansible/molecule_test_local_molecule.go | 1 + .../ansible/molecule_test_local_playbook.go | 2 +- .../pkg/scaffold/build_dockerfile.go | 2 +- .../pkg/scaffold/controller_kind.go | 10 +- .../operator-sdk/pkg/scaffold/crd.go | 17 +- .../operator-sdk/pkg/scaffold/customrender.go | 8 + .../operator-sdk/pkg/scaffold/doc.go | 2 +- .../operator-sdk/pkg/scaffold/gopkgtoml.go | 2 +- .../operator-sdk/pkg/scaffold/helm/chart.go | 235 ++++++++++++- .../pkg/scaffold/helm/dockerfilehybrid.go | 2 +- .../pkg/scaffold/helm/gopkgtoml.go | 12 +- .../pkg/scaffold/helm/operator.go | 3 - .../operator-sdk/pkg/scaffold/helm/watches.go | 6 +- .../pkg/scaffold/olm-catalog/concat_crd.go | 81 ----- .../pkg/scaffold/olm-catalog/config.go | 2 +- .../pkg/scaffold/olm-catalog/csv.go | 88 +++-- .../pkg/scaffold/olm-catalog/csv_updaters.go | 57 +-- .../operator-sdk/pkg/scaffold/register.go | 2 +- .../operator-sdk/pkg/scaffold/resource.go | 6 + .../operator-sdk/pkg/scaffold/role.go | 8 + .../operator-sdk/pkg/scaffold/scaffold.go | 25 +- .../operator-sdk/pkg/scaffold/types.go | 2 +- .../operator-sdk/pkg/test/context.go | 19 +- .../pkg/test/e2eutil/wait_util.go | 30 ++ .../operator-sdk/pkg/test/main_entry.go | 17 +- .../operator-sdk/version/version.go | 2 +- 71 files changed, 1903 insertions(+), 842 deletions(-) create mode 100644 vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/test_definitions.go create mode 100644 vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/crd.go create mode 100644 vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/object.go create mode 100644 vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/ao_logs.go delete mode 100644 vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/concat_crd.go diff --git a/Gopkg.lock b/Gopkg.lock index 72f61a1c69..e8d279fd52 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -469,7 +469,7 @@ version = "v3.9.0" [[projects]] - digest = "1:df8e741cd0f86087367f3bcfeb1cf237e96fada71194b6d4cee9412d221ec763" + digest = "1:0bd15e3e708ab1ccba566b024fcf9982b51a3ab8498d53c8d416dd8f3f9b8211" name = "github.com/operator-framework/operator-sdk" packages = [ "pkg/k8sutil", @@ -478,8 +478,8 @@ "version", ] pruneopts = "NT" - revision = "6754b70169f1b62355516947270e33b9f73d8159" - version = "v0.5.0" + revision = "61e0c23e9d2e217f8d95ac104a8f2545c102b5c3" + version = "v0.6.0" [[projects]] digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" diff --git a/Gopkg.toml b/Gopkg.toml index 917c4dd319..41a0e0f98d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -41,7 +41,7 @@ required = [ name = "github.com/operator-framework/operator-sdk" # The version rule is used for a specific release and the master branch for in between releases. # branch = "master" #osdk_branch_annotation - version = "=v0.5.0" #osdk_version_annotation + version = "=v0.6.0" #osdk_version_annotation [[constraint]] name = "github.com/coreos/prometheus-operator" diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/generate/openapi.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/generate/openapi.go index 0b7fdcacec..98bc00e80e 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/generate/openapi.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/generate/openapi.go @@ -23,15 +23,13 @@ import ( "strings" genutil "github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/generate/internal" + "github.com/operator-framework/operator-sdk/internal/util/k8sutil" "github.com/operator-framework/operator-sdk/internal/util/projutil" "github.com/operator-framework/operator-sdk/pkg/scaffold" "github.com/operator-framework/operator-sdk/pkg/scaffold/input" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var headerFile string @@ -111,26 +109,29 @@ func OpenAPIGen() error { AbsProjectPath: absProjectPath, ProjectName: filepath.Base(absProjectPath), } - crdMap, err := getCRDGVKMap() + crds, err := k8sutil.GetCRDs(scaffold.CRDsDir) if err != nil { return err } - for g, vs := range gvMap { - for _, v := range vs { - gvks := crdMap[filepath.Join(g, v)] - for _, gvk := range gvks { - r, err := scaffold.NewResource(filepath.Join(gvk.Group, gvk.Version), gvk.Kind) - if err != nil { - return err - } - err = s.Execute(cfg, - &scaffold.CRD{Resource: r, IsOperatorGo: projutil.IsOperatorGo()}, - ) - if err != nil { - return err - } + for _, crd := range crds { + g, v, k := crd.Spec.Group, crd.Spec.Version, crd.Spec.Names.Kind + if v == "" { + if len(crd.Spec.Versions) != 0 { + v = crd.Spec.Versions[0].Name + } else { + return fmt.Errorf("crd of group %s kind %s has no version", g, k) } } + r, err := scaffold.NewResource(g+"/"+v, k) + if err != nil { + return err + } + err = s.Execute(cfg, + &scaffold.CRD{Resource: r, IsOperatorGo: projutil.IsOperatorGo()}, + ) + if err != nil { + return err + } } log.Info("Code-generation complete.") @@ -179,34 +180,3 @@ func openAPIGen(binDir string, fqApis []string) (err error) { } return nil } - -func getCRDGVKMap() (map[string][]metav1.GroupVersionKind, error) { - crdInfos, err := ioutil.ReadDir(scaffold.CRDsDir) - if err != nil { - return nil, err - } - crdMap := make(map[string][]metav1.GroupVersionKind) - for _, info := range crdInfos { - if filepath.Ext(info.Name()) == ".yaml" { - path := filepath.Join(scaffold.CRDsDir, info.Name()) - b, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - crd := &apiextv1beta1.CustomResourceDefinition{} - if err := yaml.Unmarshal(b, crd); err != nil { - return nil, err - } - if crd.Kind != "CustomResourceDefinition" { - continue - } - gv := filepath.Join(strings.Split(info.Name(), "_")[:2]...) - crdMap[gv] = append(crdMap[gv], metav1.GroupVersionKind{ - Group: crd.Spec.Group, - Version: crd.Spec.Version, - Kind: crd.Spec.Names.Kind, - }) - } - } - return crdMap, nil -} diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/migrate.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/migrate.go index 3e2486a2e4..28f50ad212 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/migrate.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/migrate.go @@ -91,6 +91,7 @@ func migrateAnsible() error { &ansible.Entrypoint{}, &ansible.UserSetup{}, &ansible.K8sStatus{}, + &ansible.AoLogs{}, ) if err != nil { return fmt.Errorf("migrate ansible scaffold failed: (%v)", err) diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/new.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/new.go index 078e12f5d3..05988ea412 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/new.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/new.go @@ -56,6 +56,10 @@ generates a skeletal app-operator application in $GOPATH/src/github.com/example. newCmd.Flags().BoolVar(&generatePlaybook, "generate-playbook", false, "Generate a playbook skeleton. (Only used for --type ansible)") newCmd.Flags().BoolVar(&isClusterScoped, "cluster-scoped", false, "Generate cluster-scoped resources instead of namespace-scoped") + newCmd.Flags().StringVar(&helmChartRef, "helm-chart", "", "Initialize helm operator with existing helm chart (, /, or local path)") + newCmd.Flags().StringVar(&helmChartVersion, "helm-chart-version", "", "Specific version of the helm chart (default is latest version)") + newCmd.Flags().StringVar(&helmChartRepo, "helm-chart-repo", "", "Chart repository URL for the requested helm chart") + return newCmd } @@ -67,6 +71,10 @@ var ( skipGit bool generatePlaybook bool isClusterScoped bool + + helmChartRef string + helmChartVersion string + helmChartRepo string ) const ( @@ -256,14 +264,17 @@ func doHelmScaffold() error { ProjectName: projectName, } - resource, err := scaffold.NewResource(apiVersion, kind) - if err != nil { - return err + createOpts := helm.CreateChartOptions{ + ResourceAPIVersion: apiVersion, + ResourceKind: kind, + Chart: helmChartRef, + Version: helmChartVersion, + Repo: helmChartRepo, } - chart, err := helm.CreateChartForResource(resource, cfg.AbsProjectPath) + resource, chart, err := helm.CreateChart(cfg.AbsProjectPath, createOpts) if err != nil { - log.Fatalf("Failed to create initial helm chart for resource (%v, %v): (%v)", resource.APIVersion, resource.Kind, err) + return fmt.Errorf("failed to create helm chart: %s", err) } valuesPath := filepath.Join("", helm.HelmChartsDir, chart.GetMetadata().GetName(), "values.yaml") @@ -272,7 +283,10 @@ func doHelmScaffold() error { s := &scaffold.Scaffold{} err = s.Execute(cfg, &helm.Dockerfile{}, - &helm.WatchesYAML{Resource: resource}, + &helm.WatchesYAML{ + Resource: resource, + ChartName: chart.GetMetadata().GetName(), + }, &scaffold.ServiceAccount{}, &scaffold.Role{IsClusterScoped: isClusterScoped}, &scaffold.RoleBinding{IsClusterScoped: isClusterScoped}, @@ -300,11 +314,26 @@ func verifyFlags() error { if operatorType != projutil.OperatorTypeAnsible && generatePlaybook { return fmt.Errorf("value of --generate-playbook can only be used with --type `ansible`") } + + if len(helmChartRef) != 0 { + if operatorType != projutil.OperatorTypeHelm { + return fmt.Errorf("value of --helm-chart can only be used with --type=helm") + } + } else if len(helmChartRepo) != 0 { + return fmt.Errorf("value of --helm-chart-repo can only be used with --type=helm and --helm-chart") + } else if len(helmChartVersion) != 0 { + return fmt.Errorf("value of --helm-chart-version can only be used with --type=helm and --helm-chart") + } + if operatorType == projutil.OperatorTypeGo && (len(apiVersion) != 0 || len(kind) != 0) { return fmt.Errorf("operators of type Go do not use --api-version or --kind") } - if operatorType != projutil.OperatorTypeGo { + // --api-version and --kind are required with --type=ansible and --type=helm, with one exception. + // + // If --type=helm and --helm-chart is set, --api-version and --kind are optional. If left unset, + // sane defaults are used when the specified helm chart is created. + if operatorType == projutil.OperatorTypeAnsible || operatorType == projutil.OperatorTypeHelm && len(helmChartRef) == 0 { if len(apiVersion) == 0 { return fmt.Errorf("value of --api-version must not have empty value") } diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/olm-catalog/gen-csv.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/olm-catalog/gen-csv.go index baed97c9ad..3a972344b7 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/olm-catalog/gen-csv.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/olm-catalog/gen-csv.go @@ -16,8 +16,11 @@ package catalog import ( "fmt" + "io/ioutil" "path/filepath" + "strings" + "github.com/operator-framework/operator-sdk/internal/util/fileutil" "github.com/operator-framework/operator-sdk/internal/util/projutil" "github.com/operator-framework/operator-sdk/pkg/scaffold" "github.com/operator-framework/operator-sdk/pkg/scaffold/input" @@ -30,7 +33,9 @@ import ( var ( csvVersion string + fromVersion string csvConfigPath string + updateCRDs bool ) func NewGenCSVCmd() *cobra.Command { @@ -40,7 +45,9 @@ func NewGenCSVCmd() *cobra.Command { Long: `The gen-csv command generates a Cluster Service Version (CSV) YAML manifest for the operator. This file is used to publish the operator to the OLM Catalog. -A CSV semantic version is supplied via the --csv-version flag. +A CSV semantic version is supplied via the --csv-version flag. If your operator +has already generated a CSV manifest you want to use as a base, supply its +version to --from-version. Otherwise the SDK will scaffold a new CSV manifest. Configure CSV generation by writing a config file 'deploy/olm-catalog/csv-config.yaml`, RunE: genCSVFunc, @@ -48,7 +55,9 @@ Configure CSV generation by writing a config file 'deploy/olm-catalog/csv-config genCSVCmd.Flags().StringVar(&csvVersion, "csv-version", "", "Semantic version of the CSV") genCSVCmd.MarkFlagRequired("csv-version") + genCSVCmd.Flags().StringVar(&fromVersion, "from-version", "", "Semantic version of an existing CSV to use as a base") genCSVCmd.Flags().StringVar(&csvConfigPath, "csv-config", "", "Path to CSV config file. Defaults to deploy/olm-catalog/csv-config.yaml") + genCSVCmd.Flags().BoolVar(&updateCRDs, "update-crds", false, "Update CRD manifests in deploy/{operator-name}/{csv-version} the using latest API's") return genCSVCmd } @@ -67,32 +76,83 @@ func genCSVFunc(cmd *cobra.Command, args []string) error { AbsProjectPath: absProjectPath, ProjectName: filepath.Base(absProjectPath), } - if projutil.GetOperatorType() == projutil.OperatorTypeGo { + if projutil.IsOperatorGo() { cfg.Repo = projutil.CheckAndGetProjectGoPkg() } log.Infof("Generating CSV manifest version %s", csvVersion) s := &scaffold.Scaffold{} - err := s.Execute(cfg, - &catalog.CSV{CSVVersion: csvVersion, ConfigFilePath: csvConfigPath}, - &catalog.ConcatCRD{ConfigFilePath: csvConfigPath}, - ) - if err != nil { + csv := &catalog.CSV{ + CSVVersion: csvVersion, + FromVersion: fromVersion, + ConfigFilePath: csvConfigPath, + } + if err := s.Execute(cfg, csv); err != nil { return fmt.Errorf("catalog scaffold failed: (%v)", err) } + + // Write CRD's to the new or updated CSV package dir. + if updateCRDs { + input, err := csv.GetInput() + if err != nil { + return err + } + cfg, err := catalog.GetCSVConfig(csvConfigPath) + if err != nil { + return err + } + err = writeCRDsToDir(cfg.CRDCRPaths, filepath.Dir(input.Path)) + if err != nil { + return err + } + } + return nil } func verifyGenCSVFlags() error { - v, err := semver.NewVersion(csvVersion) + if err := verifyCSVVersion(csvVersion); err != nil { + return err + } + if fromVersion != "" { + if err := verifyCSVVersion(fromVersion); err != nil { + return err + } + } + if fromVersion != "" && csvVersion == fromVersion { + return fmt.Errorf("from-version (%s) cannot equal csv-version; set only csv-version instead", fromVersion) + } + return nil +} + +func verifyCSVVersion(version string) error { + v, err := semver.NewVersion(version) if err != nil { - return fmt.Errorf("%s is not a valid semantic version: (%v)", csvVersion, err) + return fmt.Errorf("%s is not a valid semantic version: (%v)", version, err) } // Ensures numerical values composing csvVersion don't contain leading 0's, // ex. 01.01.01 - if v.String() != csvVersion { - return fmt.Errorf("provided CSV version %s contains bad values (parses to %s)", csvVersion, v) + if v.String() != version { + return fmt.Errorf("provided CSV version %s contains bad values (parses to %s)", version, v) + } + return nil +} + +func writeCRDsToDir(crdPaths []string, toDir string) error { + for _, p := range crdPaths { + if !strings.HasSuffix(p, "crd.yaml") { + continue + } + b, err := ioutil.ReadFile(p) + if err != nil { + return err + } + path := filepath.Join(toDir, filepath.Base(p)) + err = ioutil.WriteFile(path, b, fileutil.DefaultFileMode) + if err != nil { + return err + } } return nil } diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard.go index 6449012338..e514eb34e9 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard.go @@ -18,14 +18,13 @@ import ( "fmt" "strings" + "github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard" "github.com/operator-framework/operator-sdk/pkg/scaffold" + "github.com/operator-framework/operator-sdk/version" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" - - "github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard" - "github.com/operator-framework/operator-sdk/version" ) // scorecardConfig stores all scorecard config passed as flags @@ -33,6 +32,7 @@ type scorecardConfig struct { namespace string kubeconfigPath string initTimeout int + olmDeployed bool csvPath string basicTests bool olmTests bool @@ -61,6 +61,7 @@ func NewScorecardCmd() *cobra.Command { scorecardCmd.Flags().StringVar(&scConf.namespace, scorecard.NamespaceOpt, "", "Namespace of custom resource created in cluster") scorecardCmd.Flags().StringVar(&scConf.kubeconfigPath, scorecard.KubeconfigOpt, "", "Path to kubeconfig of custom resource created in cluster") scorecardCmd.Flags().IntVar(&scConf.initTimeout, scorecard.InitTimeoutOpt, 10, "Timeout for status block on CR to be created in seconds") + scorecardCmd.Flags().BoolVar(&scConf.olmDeployed, scorecard.OlmDeployedOpt, false, "The OLM has deployed the operator. Use only the CSV for test data") scorecardCmd.Flags().StringVar(&scConf.csvPath, scorecard.CSVPathOpt, "", "Path to CSV being tested") scorecardCmd.Flags().BoolVar(&scConf.basicTests, scorecard.BasicTestsOpt, true, "Enable basic operator checks") scorecardCmd.Flags().BoolVar(&scConf.olmTests, scorecard.OLMTestsOpt, true, "Enable OLM integration checks") diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/basic_tests.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/basic_tests.go index 198b9435d9..53339cc73e 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/basic_tests.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/basic_tests.go @@ -15,195 +15,56 @@ package scorecard import ( - "bytes" "context" "encoding/json" "fmt" - "math/rand" - "reflect" "strings" - "time" - "github.com/operator-framework/operator-sdk/internal/util/fileutil" - - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// checkSpecAndStat checks that the spec and status blocks exist. If noStore is set to true, this function -// will not store the result of the test in scTests and will instead just wait until the spec and -// status blocks exist or return an error after the timeout. -func checkSpecAndStat(runtimeClient client.Client, obj *unstructured.Unstructured, noStore bool) error { - testSpec := scorecardTest{testType: basicOperator, name: "Spec Block Exists", maximumPoints: 1} - testStat := scorecardTest{testType: basicOperator, name: "Status Block Exist", maximumPoints: 1} - err := wait.Poll(time.Second*1, time.Second*time.Duration(viper.GetInt64(InitTimeoutOpt)), func() (bool, error) { - err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) - if err != nil { - return false, fmt.Errorf("error getting custom resource: %v", err) - } - var specPass, statusPass bool - if obj.Object["spec"] != nil { - testSpec.earnedPoints = 1 - specPass = true - } - - if obj.Object["status"] != nil { - testStat.earnedPoints = 1 - statusPass = true - } - return statusPass && specPass, nil - }) - if !noStore { - scTests = append(scTests, testSpec, testStat) - } - if err != nil && err != wait.ErrWaitTimeout { - return err +// Run - implements Test interface +func (t *CheckSpecTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t, MaximumPoints: 1} + err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR) + if err != nil { + res.Errors = append(res.Errors, fmt.Errorf("error getting custom resource: %v", err)) + return res } - if testSpec.earnedPoints != 1 { - scSuggestions = append(scSuggestions, "Add a 'spec' field to your Custom Resource") + if t.CR.Object["spec"] != nil { + res.EarnedPoints++ } - if testStat.earnedPoints != 1 { - scSuggestions = append(scSuggestions, "Add a 'status' field to your Custom Resource") + if res.EarnedPoints != 1 { + res.Suggestions = append(res.Suggestions, "Add a 'spec' field to your Custom Resource") } - return nil + return res } -// TODO: user specified tests for operators - -// checkStatusUpdate looks at all fields in the spec section of a custom resource and attempts to modify them and -// see if the status changes as a result. This is a bit prone to breakage as this is a black box test and we don't -// know much about how the operators we are testing actually work and may pass an invalid value. In the future, we -// should use user-specified tests -func checkStatusUpdate(runtimeClient client.Client, obj *unstructured.Unstructured) error { - test := scorecardTest{testType: basicOperator, name: "Operator actions are reflected in status", maximumPoints: 1} - err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) +// Run - implements Test interface +func (t *CheckStatusTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t, MaximumPoints: 1} + err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR) if err != nil { - return fmt.Errorf("error getting custom resource: %v", err) - } - if obj.Object["status"] == nil || obj.Object["spec"] == nil { - scTests = append(scTests, test) - return nil + res.Errors = append(res.Errors, fmt.Errorf("error getting custom resource: %v", err)) + return res } - statCopy := make(map[string]interface{}) - for k, v := range obj.Object["status"].(map[string]interface{}) { - statCopy[k] = v + if t.CR.Object["status"] != nil { + res.EarnedPoints++ } - specMap := obj.Object["spec"].(map[string]interface{}) - err = modifySpecAndCheck(specMap, obj) - if err != nil { - test.earnedPoints = 0 - scSuggestions = append(scSuggestions, "Make sure that the 'status' block is always updated to reflect changes after the 'spec' block is changed") - scTests = append(scTests, test) - return nil + if res.EarnedPoints != 1 { + res.Suggestions = append(res.Suggestions, "Add a 'status' field to your Custom Resource") } - test.earnedPoints = 1 - scTests = append(scTests, test) - return nil + return res } -// modifySpecAndCheck is a helper function for checkStatusUpdate -func modifySpecAndCheck(specMap map[string]interface{}, obj *unstructured.Unstructured) error { - statCopy := make(map[string]interface{}) - for k, v := range obj.Object["status"].(map[string]interface{}) { - statCopy[k] = v - } - var err error - for k, v := range specMap { - mapType := false - switch t := v.(type) { - case int64: - specMap[k] = specMap[k].(int64) + 1 - case float64: - specMap[k] = specMap[k].(float64) + 1 - case string: - // TODO: try and find out how to make this better - // Since strings may be very operator specific, this test may not work correctly in many cases - specMap[k] = fmt.Sprintf("operator sdk test value %f", rand.Float64()) - case bool: - specMap[k] = !specMap[k].(bool) - case map[string]interface{}: - mapType = true - err = modifySpecAndCheck(specMap[k].(map[string]interface{}), obj) - case []map[string]interface{}: - mapType = true - for _, item := range specMap[k].([]map[string]interface{}) { - err = modifySpecAndCheck(item, obj) - if err != nil { - break - } - } - case []interface{}: // TODO: Decide how this should be handled - default: - fmt.Printf("Unknown type for key (%s) in spec: (%v)\n", k, reflect.TypeOf(t)) - } - if !mapType { - if err := runtimeClient.Update(context.TODO(), obj); err != nil { - return fmt.Errorf("failed to update object: %v", err) - } - err = wait.Poll(time.Second*1, time.Second*15, func() (done bool, err error) { - err = runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) - if err != nil { - return false, err - } - return !reflect.DeepEqual(statCopy, obj.Object["status"]), nil - }) - } - if err != nil { - return err - } - //reset stat copy to match - statCopy = make(map[string]interface{}) - for k, v := range obj.Object["status"].(map[string]interface{}) { - statCopy[k] = v - } - } - return nil -} - -// wiritingIntoCRsHasEffect simply looks at the proxy logs and verifies that the operator is sending PUT -// and/or POST requests to the API server, which should mean that it is creating or modifying resources. -func writingIntoCRsHasEffect(obj *unstructured.Unstructured) (string, error) { - test := scorecardTest{testType: basicOperator, name: "Writing into CRs has an effect", maximumPoints: 1} - kubeclient, err := kubernetes.NewForConfig(kubeconfig) - if err != nil { - return "", fmt.Errorf("failed to create kubeclient: %v", err) - } - dep := &appsv1.Deployment{} - err = runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: deploymentName}, dep) - if err != nil { - return "", fmt.Errorf("failed to get newly created operator deployment: %v", err) - } - set := labels.Set(dep.Spec.Selector.MatchLabels) - pods := &v1.PodList{} - err = runtimeClient.List(context.TODO(), &client.ListOptions{LabelSelector: set.AsSelector()}, pods) - if err != nil { - return "", fmt.Errorf("failed to get list of pods in deployment: %v", err) - } - proxyPod = &pods.Items[0] - req := kubeclient.CoreV1().Pods(obj.GetNamespace()).GetLogs(proxyPod.GetName(), &v1.PodLogOptions{Container: "scorecard-proxy"}) - readCloser, err := req.Stream() - if err != nil { - return "", fmt.Errorf("failed to get logs: %v", err) - } - defer func() { - if err := readCloser.Close(); err != nil && !fileutil.IsClosedError(err) { - log.Errorf("Failed to close pod log reader: (%v)", err) - } - }() - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(readCloser) +// Run - implements Test interface +func (t *WritingIntoCRsHasEffectTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t, MaximumPoints: 1} + logs, err := getProxyLogs(t.ProxyPod) if err != nil { - return "", fmt.Errorf("test failed and failed to read pod logs: %v", err) + res.Errors = append(res.Errors, fmt.Errorf("error getting proxy logs: %v", err)) + return res } - logs := buf.String() msgMap := make(map[string]interface{}) for _, msg := range strings.Split(logs, "\n") { if err := json.Unmarshal([]byte(msg), &msgMap); err != nil { @@ -214,13 +75,12 @@ func writingIntoCRsHasEffect(obj *unstructured.Unstructured) (string, error) { continue } if method == "PUT" || method == "POST" { - test.earnedPoints = 1 + res.EarnedPoints = 1 break } } - scTests = append(scTests, test) - if test.earnedPoints != 1 { - scSuggestions = append(scSuggestions, "The operator should write into objects to update state. No PUT or POST requests from you operator were recorded by the scorecard.") + if res.EarnedPoints != 1 { + res.Suggestions = append(res.Suggestions, "The operator should write into objects to update state. No PUT or POST requests from the operator were recorded by the scorecard.") } - return buf.String(), nil + return res } diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/olm_tests.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/olm_tests.go index 30fa36de40..252b2655fd 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/olm_tests.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/olm_tests.go @@ -16,43 +16,20 @@ package scorecard import ( "context" + "encoding/json" "fmt" - "io/ioutil" - "path/filepath" "strings" - "github.com/operator-framework/operator-sdk/pkg/scaffold" + "github.com/operator-framework/operator-sdk/internal/util/k8sutil" olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" ) -func getCRDs(crdsDir string) ([]apiextv1beta1.CustomResourceDefinition, error) { - files, err := ioutil.ReadDir(crdsDir) - if err != nil { - return nil, fmt.Errorf("could not read deploy directory: (%v)", err) - } - crds := []apiextv1beta1.CustomResourceDefinition{} - for _, file := range files { - if strings.HasSuffix(file.Name(), "crd.yaml") { - obj, err := yamlToUnstructured(filepath.Join(scaffold.CRDsDir, file.Name())) - if err != nil { - return nil, err - } - crd, err := unstructuredToCRD(obj) - if err != nil { - return nil, err - } - crds = append(crds, *crd) - } - } - return crds, nil -} - func matchKind(kind1, kind2 string) bool { singularKind1, err := restMapper.ResourceSingularizer(kind1) if err != nil { @@ -68,7 +45,7 @@ func matchKind(kind1, kind2 string) bool { } // matchVersion checks if a CRD contains a specified version in a case insensitive manner -func matchVersion(version string, crd apiextv1beta1.CustomResourceDefinition) bool { +func matchVersion(version string, crd *apiextv1beta1.CustomResourceDefinition) bool { if strings.EqualFold(version, crd.Spec.Version) { return true } @@ -81,163 +58,266 @@ func matchVersion(version string, crd apiextv1beta1.CustomResourceDefinition) bo return false } -// crdsHaveValidation makes sure that all CRDs have a validation block -func crdsHaveValidation(crdsDir string, runtimeClient client.Client, obj *unstructured.Unstructured) error { - test := scorecardTest{testType: olmIntegration, name: "Provided APIs have validation"} - crds, err := getCRDs(crdsDir) +// Run - implements Test interface +func (t *CRDsHaveValidationTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t} + crds, err := k8sutil.GetCRDs(t.CRDsDir) if err != nil { - return fmt.Errorf("failed to get CRDs in %s directory: %v", crdsDir, err) + res.Errors = append(res.Errors, fmt.Errorf("failed to get CRDs in %s directory: %v", t.CRDsDir, err)) + return res } - err = runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) + err = t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR) if err != nil { - return err + res.Errors = append(res.Errors, err) + return res } // TODO: we need to make this handle multiple CRs better/correctly for _, crd := range crds { - test.maximumPoints++ + res.MaximumPoints++ if crd.Spec.Validation == nil { - scSuggestions = append(scSuggestions, fmt.Sprintf("Add CRD validation for %s/%s", crd.Spec.Names.Kind, crd.Spec.Version)) + res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add CRD validation for %s/%s", crd.Spec.Names.Kind, crd.Spec.Version)) continue } // check if the CRD matches the testing CR - gvk := obj.GroupVersionKind() + gvk := t.CR.GroupVersionKind() // Only check the validation block if the CRD and CR have the same Kind and Version if !(matchVersion(gvk.Version, crd) && matchKind(gvk.Kind, crd.Spec.Names.Kind)) { - test.earnedPoints++ + res.EarnedPoints++ continue } failed := false - if obj.Object["spec"] != nil { - spec := obj.Object["spec"].(map[string]interface{}) + if t.CR.Object["spec"] != nil { + spec := t.CR.Object["spec"].(map[string]interface{}) for key := range spec { if _, ok := crd.Spec.Validation.OpenAPIV3Schema.Properties["spec"].Properties[key]; !ok { failed = true - scSuggestions = append(scSuggestions, fmt.Sprintf("Add CRD validation for spec field `%s` in %s/%s", key, gvk.Kind, gvk.Version)) + res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add CRD validation for spec field `%s` in %s/%s", key, gvk.Kind, gvk.Version)) } } } - if obj.Object["status"] != nil { - status := obj.Object["status"].(map[string]interface{}) + if t.CR.Object["status"] != nil { + status := t.CR.Object["status"].(map[string]interface{}) for key := range status { if _, ok := crd.Spec.Validation.OpenAPIV3Schema.Properties["status"].Properties[key]; !ok { failed = true - scSuggestions = append(scSuggestions, fmt.Sprintf("Add CRD validation for status field `%s` in %s/%s", key, gvk.Kind, gvk.Version)) + res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add CRD validation for status field `%s` in %s/%s", key, gvk.Kind, gvk.Version)) } } } if !failed { - test.earnedPoints++ + res.EarnedPoints++ + } + } + return res +} + +// Run - implements Test interface +func (t *CRDsHaveResourcesTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t} + for _, crd := range t.CSV.Spec.CustomResourceDefinitions.Owned { + res.MaximumPoints++ + gvk := t.CR.GroupVersionKind() + if strings.EqualFold(crd.Version, gvk.Version) && matchKind(gvk.Kind, crd.Kind) { + resources, err := getUsedResources(t.ProxyPod) + if err != nil { + log.Warningf("getUsedResource failed: %v", err) + } + allResourcesListed := true + for _, resource := range resources { + foundResource := false + for _, listedResource := range crd.Resources { + if matchKind(resource.Kind, listedResource.Kind) && strings.EqualFold(resource.Version, listedResource.Version) { + foundResource = true + } + } + if foundResource == false { + allResourcesListed = false + } + } + if allResourcesListed { + res.EarnedPoints++ + } + } else { + if len(crd.Resources) > 0 { + res.EarnedPoints++ + } } } - scTests = append(scTests, test) - return nil + if res.EarnedPoints < res.MaximumPoints { + res.Suggestions = append(res.Suggestions, "Add resources to owned CRDs") + } + return res } -// crdsHaveResources checks to make sure that all owned CRDs have resources listed -func crdsHaveResources(csv *olmapiv1alpha1.ClusterServiceVersion) { - test := scorecardTest{testType: olmIntegration, name: "Owned CRDs have resources listed"} - for _, crd := range csv.Spec.CustomResourceDefinitions.Owned { - test.maximumPoints++ - if len(crd.Resources) > 0 { - test.earnedPoints++ +func getUsedResources(proxyPod *v1.Pod) ([]schema.GroupVersionKind, error) { + logs, err := getProxyLogs(proxyPod) + if err != nil { + return nil, err + } + resources := map[schema.GroupVersionKind]bool{} + for _, line := range strings.Split(logs, "\n") { + logMap := make(map[string]interface{}) + err := json.Unmarshal([]byte(line), &logMap) + if err != nil { + // it is very common to get "unexpected end of JSON input", so we'll leave this at the debug level + log.Debugf("could not unmarshal line: %v", err) + continue + } + /* + There are 6 formats a resource uri can have: + Cluster-Scoped: + Collection: /apis/GROUP/VERSION/KIND + Individual: /apis/GROUP/VERSION/KIND/NAME + Core: /api/v1/KIND + Namespaces: + All Namespaces: /apis/GROUP/VERSION/KIND (same as cluster collection) + Collection in Namespace: /apis/GROUP/VERSION/namespaces/NAMESPACE/KIND + Individual: /apis/GROUP/VERSION/namespaces/NAMESPACE/KIND/NAME + Core: /api/v1/namespaces/NAMESPACE/KIND + + These urls are also often appended with options, which are denoted by the '?' symbol + */ + if msg, ok := logMap["msg"].(string); !ok || msg != "Request Info" { + continue + } + uri, ok := logMap["uri"].(string) + if !ok { + log.Warn("URI type is not string") + continue + } + removedOptions := strings.Split(uri, "?")[0] + splitURI := strings.Split(removedOptions, "/") + // first string is empty string "" + if len(splitURI) < 2 { + log.Warnf("Invalid URI: \"%s\"", uri) + continue + } + splitURI = splitURI[1:] + switch len(splitURI) { + case 3: + if splitURI[0] == "api" { + resources[schema.GroupVersionKind{Version: splitURI[1], Kind: splitURI[2]}] = true + break + } else if splitURI[0] == "apis" { + // this situation happens when the client enumerates the available resources of the server + // Example: "/apis/apps/v1?timeout=32s" + break + } + log.Warnf("Invalid URI: \"%s\"", uri) + case 4: + if splitURI[0] == "apis" { + resources[schema.GroupVersionKind{Group: splitURI[1], Version: splitURI[2], Kind: splitURI[3]}] = true + break + } + log.Warnf("Invalid URI: \"%s\"", uri) + case 5: + if splitURI[0] == "api" { + resources[schema.GroupVersionKind{Version: splitURI[1], Kind: splitURI[4]}] = true + break + } else if splitURI[0] == "apis" { + resources[schema.GroupVersionKind{Group: splitURI[1], Version: splitURI[2], Kind: splitURI[3]}] = true + break + } + log.Warnf("Invalid URI: \"%s\"", uri) + case 6, 7: + if splitURI[0] == "apis" { + resources[schema.GroupVersionKind{Group: splitURI[1], Version: splitURI[2], Kind: splitURI[5]}] = true + break + } + log.Warnf("Invalid URI: \"%s\"", uri) } } - scTests = append(scTests, test) - if test.earnedPoints == 0 { - scSuggestions = append(scSuggestions, "Add resources to owned CRDs") + var resourcesArr []schema.GroupVersionKind + for gvk := range resources { + resourcesArr = append(resourcesArr, gvk) } + return resourcesArr, nil } -// annotationsContainExamples makes sure that the CSVs list at least 1 example for the CR -func annotationsContainExamples(csv *olmapiv1alpha1.ClusterServiceVersion) { - test := scorecardTest{testType: olmIntegration, name: "CRs have at least 1 example", maximumPoints: 1} - if csv.Annotations != nil && csv.Annotations["alm-examples"] != "" { - test.earnedPoints = 1 +// Run - implements Test interface +func (t *AnnotationsContainExamplesTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t, MaximumPoints: 1} + if t.CSV.Annotations != nil && t.CSV.Annotations["alm-examples"] != "" { + res.EarnedPoints = 1 } - scTests = append(scTests, test) - if test.earnedPoints == 0 { - scSuggestions = append(scSuggestions, "Add an alm-examples annotation to your CSV to pass the "+test.name+" test") + if res.EarnedPoints == 0 { + res.Suggestions = append(res.Suggestions, fmt.Sprintf("Add an alm-examples annotation to your CSV to pass the %s test", t.GetName())) } + return res } -// statusDescriptors makes sure that all status fields found in the created CR has a matching descriptor in the CSV -func statusDescriptors(csv *olmapiv1alpha1.ClusterServiceVersion, runtimeClient client.Client, obj *unstructured.Unstructured) error { - test := scorecardTest{testType: olmIntegration, name: "Status fields with descriptors"} - err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) +// Run - implements Test interface +func (t *StatusDescriptorsTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t} + err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR) if err != nil { - return err + res.Errors = append(res.Errors, err) + return res } - if obj.Object["status"] == nil { - // what should we do if there is no status block? Maybe some kind of N/A type output? - scTests = append(scTests, test) - return nil + if t.CR.Object["status"] == nil { + return res } - statusBlock := obj.Object["status"].(map[string]interface{}) - test.maximumPoints = len(statusBlock) + statusBlock := t.CR.Object["status"].(map[string]interface{}) + res.MaximumPoints = len(statusBlock) var crd *olmapiv1alpha1.CRDDescription - for _, owned := range csv.Spec.CustomResourceDefinitions.Owned { - if owned.Kind == obj.GetKind() { + for _, owned := range t.CSV.Spec.CustomResourceDefinitions.Owned { + if owned.Kind == t.CR.GetKind() { crd = &owned break } } if crd == nil { - scTests = append(scTests, test) - return nil + return res } for key := range statusBlock { for _, statDesc := range crd.StatusDescriptors { if statDesc.Path == key { - test.earnedPoints++ + res.EarnedPoints++ delete(statusBlock, key) break } } } - scTests = append(scTests, test) for key := range statusBlock { - scSuggestions = append(scSuggestions, "Add a status descriptor for "+key) + res.Suggestions = append(res.Suggestions, "Add a status descriptor for "+key) } - return nil + return res } -// specDescriptors makes sure that all spec fields found in the created CR has a matching descriptor in the CSV -func specDescriptors(csv *olmapiv1alpha1.ClusterServiceVersion, runtimeClient client.Client, obj *unstructured.Unstructured) error { - test := scorecardTest{testType: olmIntegration, name: "Spec fields with descriptors"} - err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}, obj) +// Run - implements Test interface +func (t *SpecDescriptorsTest) Run(ctx context.Context) *TestResult { + res := &TestResult{Test: t} + err := t.Client.Get(ctx, types.NamespacedName{Namespace: t.CR.GetNamespace(), Name: t.CR.GetName()}, t.CR) if err != nil { - return err + res.Errors = append(res.Errors, err) + return res } - if obj.Object["spec"] == nil { - // what should we do if there is no spec block? Maybe some kind of N/A type output? - scTests = append(scTests, test) - return nil + if t.CR.Object["spec"] == nil { + return res } - specBlock := obj.Object["spec"].(map[string]interface{}) - test.maximumPoints = len(specBlock) + specBlock := t.CR.Object["spec"].(map[string]interface{}) + res.MaximumPoints = len(specBlock) var crd *olmapiv1alpha1.CRDDescription - for _, owned := range csv.Spec.CustomResourceDefinitions.Owned { - if owned.Kind == obj.GetKind() { + for _, owned := range t.CSV.Spec.CustomResourceDefinitions.Owned { + if owned.Kind == t.CR.GetKind() { crd = &owned break } } if crd == nil { - scTests = append(scTests, test) - return nil + return res } for key := range specBlock { - for _, specDesc := range crd.SpecDescriptors { - if specDesc.Path == key { - test.earnedPoints++ + for _, statDesc := range crd.SpecDescriptors { + if statDesc.Path == key { + res.EarnedPoints++ delete(specBlock, key) break } } } - scTests = append(scTests, test) for key := range specBlock { - scSuggestions = append(scSuggestions, "Add a spec descriptor for "+key) + res.Suggestions = append(res.Suggestions, "Add a spec descriptor for "+key) } - return nil + return res } diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/resource_handler.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/resource_handler.go index 4b51f59fbc..2262dcf4f0 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/resource_handler.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/resource_handler.go @@ -23,23 +23,47 @@ import ( "os" "time" + "github.com/operator-framework/operator-sdk/internal/util/yamlutil" proxyConf "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/kubeconfig" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/spf13/viper" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" ) +type cleanupFn func() error + +// waitUntilCRStatusExists waits until the status block of the CR currently being tested exists. If the timeout +// is reached, it simply continues and assumes there is no status block +func waitUntilCRStatusExists(cr *unstructured.Unstructured) error { + err := wait.Poll(time.Second*1, time.Second*time.Duration(viper.GetInt(InitTimeoutOpt)), func() (bool, error) { + err := runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: cr.GetNamespace(), Name: cr.GetName()}, cr) + if err != nil { + return false, fmt.Errorf("error getting custom resource: %v", err) + } + if cr.Object["status"] != nil { + return true, nil + } + return false, nil + }) + if err != nil && err != wait.ErrWaitTimeout { + return err + } + return nil +} + // yamlToUnstructured decodes a yaml file into an unstructured object func yamlToUnstructured(yamlPath string) (*unstructured.Unstructured, error) { yamlFile, err := ioutil.ReadFile(yamlPath) @@ -65,18 +89,14 @@ func yamlToUnstructured(yamlPath string) (*unstructured.Unstructured, error) { // createFromYAMLFile will take a path to a YAML file and create the resource. If it finds a // deployment, it will add the scorecard proxy as a container in the deployments podspec. func createFromYAMLFile(yamlPath string) error { - yamlFile, err := ioutil.ReadFile(yamlPath) + yamlSpecs, err := ioutil.ReadFile(yamlPath) if err != nil { return fmt.Errorf("failed to read file %s: %v", yamlPath, err) } - yamlSplit := bytes.Split(yamlFile, []byte("\n---\n")) - for _, yamlSpec := range yamlSplit { - // some autogenerated files may include an extra `---` at the end of the file - if string(yamlSpec) == "" { - continue - } + scanner := yamlutil.NewYAMLScanner(yamlSpecs) + for scanner.Scan() { obj := &unstructured.Unstructured{} - jsonSpec, err := yaml.YAMLToJSON(yamlSpec) + jsonSpec, err := yaml.YAMLToJSON(scanner.Bytes()) if err != nil { return fmt.Errorf("could not convert yaml file to json: %v", err) } @@ -130,10 +150,55 @@ func createFromYAMLFile(yamlPath string) error { } } addResourceCleanup(obj, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}) + if obj.GetKind() == "Deployment" { + proxyPodGlobal, err = getPodFromDeployment(deploymentName, viper.GetString(NamespaceOpt)) + if err != nil { + return err + } + } + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("failed to scan %s: (%v)", yamlPath, err) } + return nil } +// getPodFromDeployment returns a deployment depName's pod in namespace. +func getPodFromDeployment(depName, namespace string) (pod *v1.Pod, err error) { + dep := &appsv1.Deployment{} + err = runtimeClient.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: depName}, dep) + if err != nil { + return nil, fmt.Errorf("failed to get newly created deployment: %v", err) + } + set := labels.Set(dep.Spec.Selector.MatchLabels) + // In some cases, the pod from the old deployment will be picked up + // instead of the new one. + err = wait.PollImmediate(time.Second*1, time.Second*60, func() (bool, error) { + pods := &v1.PodList{} + err = runtimeClient.List(context.TODO(), &client.ListOptions{LabelSelector: set.AsSelector()}, pods) + if err != nil { + return false, fmt.Errorf("failed to get list of pods in deployment: %v", err) + } + // Make sure the pods exist. There should only be 1 pod per deployment. + if len(pods.Items) == 1 { + // If the pod has a deletion timestamp, it is the old pod; wait for + // pod with no deletion timestamp + if pods.Items[0].GetDeletionTimestamp() == nil { + pod = &pods.Items[0] + return true, nil + } + } else { + log.Debug("Operator deployment has more than 1 pod") + } + return false, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to get proxyPod: %s", err) + } + return pod, nil +} + // createKubeconfigSecret creates the secret that will be mounted in the operator's container and contains // the kubeconfig for communicating with the proxy func createKubeconfigSecret() error { @@ -215,7 +280,7 @@ func addProxyContainer(dep *appsv1.Deployment) { pullPolicy = v1.PullAlways } dep.Spec.Template.Spec.Containers = append(dep.Spec.Template.Spec.Containers, v1.Container{ - Name: "scorecard-proxy", + Name: scorecardContainerName, Image: viper.GetString(ProxyImageOpt), ImagePullPolicy: pullPolicy, Command: []string{"scorecard-proxy"}, @@ -226,24 +291,6 @@ func addProxyContainer(dep *appsv1.Deployment) { }) } -// unstructuredToCRD converts an unstructured object to a CRD -func unstructuredToCRD(obj *unstructured.Unstructured) (*apiextv1beta1.CustomResourceDefinition, error) { - jsonByte, err := obj.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("failed to convert CRD to json: %v", err) - } - crdObj, _, err := dynamicDecoder.Decode(jsonByte, nil, nil) - if err != nil { - return nil, fmt.Errorf("failed to decode CRD object: %v", err) - } - switch o := crdObj.(type) { - case *apiextv1beta1.CustomResourceDefinition: - return o, nil - default: - return nil, fmt.Errorf("conversion of runtime object to CRD failed (resulting runtime object not CRD type)") - } -} - // unstructuredToDeployment converts an unstructured object to a deployment func unstructuredToDeployment(obj *unstructured.Unstructured) (*appsv1.Deployment, error) { jsonByte, err := obj.MarshalJSON() @@ -317,3 +364,24 @@ func addResourceCleanup(obj runtime.Object, key types.NamespacedName) { return nil }) } + +func getProxyLogs(proxyPod *v1.Pod) (string, error) { + // need a standard kubeclient for pod logs + kubeclient, err := kubernetes.NewForConfig(kubeconfig) + if err != nil { + return "", fmt.Errorf("failed to create kubeclient: %v", err) + } + logOpts := &v1.PodLogOptions{Container: scorecardContainerName} + req := kubeclient.CoreV1().Pods(proxyPod.GetNamespace()).GetLogs(proxyPod.GetName(), logOpts) + readCloser, err := req.Stream() + if err != nil { + return "", fmt.Errorf("failed to get logs: %v", err) + } + defer readCloser.Close() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(readCloser) + if err != nil { + return "", fmt.Errorf("test failed and failed to read pod logs: %v", err) + } + return buf.String(), nil +} diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/scorecard.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/scorecard.go index 9c40fb298a..82e88af502 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/scorecard.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/scorecard.go @@ -15,17 +15,21 @@ package scorecard import ( + "context" + "encoding/json" "errors" "fmt" "io/ioutil" "os" - "github.com/operator-framework/operator-sdk/internal/util/projutil" - k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil" + "github.com/operator-framework/operator-sdk/internal/util/projutil" "github.com/operator-framework/operator-sdk/internal/util/yamlutil" + "github.com/operator-framework/operator-sdk/pkg/scaffold" + "github.com/ghodss/yaml" olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" + olminstall "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -46,6 +50,7 @@ const ( NamespaceOpt = "namespace" KubeconfigOpt = "kubeconfig" InitTimeoutOpt = "init-timeout" + OlmDeployedOpt = "olm-deployed" CSVPathOpt = "csv-path" BasicTestsOpt = "basic-tests" OLMTestsOpt = "olm-tests" @@ -65,87 +70,43 @@ const ( goodTenant = "Good Tenant" ) -// TODO: add point weights to tests -type scorecardTest struct { - testType string - name string - description string - earnedPoints int - maximumPoints int -} - -type cleanupFn func() error - var ( kubeconfig *rest.Config - scTests []scorecardTest - scSuggestions []string dynamicDecoder runtime.Decoder runtimeClient client.Client restMapper *restmapper.DeferredDiscoveryRESTMapper deploymentName string - proxyPod *v1.Pod + proxyPodGlobal *v1.Pod cleanupFns []cleanupFn ScorecardConf string ) -const scorecardPodName = "operator-scorecard-test" +const ( + scorecardPodName = "operator-scorecard-test" + scorecardContainerName = "scorecard-proxy" +) func ScorecardTests(cmd *cobra.Command, args []string) error { - err := initConfig() - if err != nil { + if err := initConfig(); err != nil { return err } - if viper.GetString(CRManifestOpt) == "" { - return errors.New("cr-manifest config option missing") - } - if !viper.GetBool(BasicTestsOpt) && !viper.GetBool(OLMTestsOpt) { - return errors.New("at least one test type is required") - } - if viper.GetBool(OLMTestsOpt) && viper.GetString(CSVPathOpt) == "" { - return fmt.Errorf("if olm-tests is enabled, the --csv-path flag must be set") - } - pullPolicy := viper.GetString(ProxyPullPolicyOpt) - if pullPolicy != "Always" && pullPolicy != "Never" && pullPolicy != "PullIfNotPresent" { - return fmt.Errorf("invalid proxy pull policy: (%s); valid values: Always, Never, PullIfNotPresent", pullPolicy) + if err := validateScorecardFlags(); err != nil { + return err } cmd.SilenceUsage = true if viper.GetBool(VerboseOpt) { log.SetLevel(log.DebugLevel) } - // if no namespaced manifest path is given, combine deploy/service_account.yaml, deploy/role.yaml, deploy/role_binding.yaml and deploy/operator.yaml - if viper.GetString(NamespacedManifestOpt) == "" { - file, err := yamlutil.GenerateCombinedNamespacedManifest() - if err != nil { - return err - } - viper.Set(NamespacedManifestOpt, file.Name()) - defer func() { - err := os.Remove(viper.GetString(NamespacedManifestOpt)) - if err != nil { - log.Errorf("Could not delete temporary namespace manifest file: (%v)", err) - } - }() - } - if viper.GetString(GlobalManifestOpt) == "" { - file, err := yamlutil.GenerateCombinedGlobalManifest() - if err != nil { - return err - } - viper.Set(GlobalManifestOpt, file.Name()) - defer func() { - err := os.Remove(viper.GetString(GlobalManifestOpt)) - if err != nil { - log.Errorf("Could not delete global manifest file: (%v)", err) - } - }() - } defer func() { if err := cleanupScorecard(); err != nil { log.Errorf("Failed to clenup resources: (%v)", err) } }() - var tmpNamespaceVar string + + var ( + tmpNamespaceVar string + err error + ) kubeconfig, tmpNamespaceVar, err = k8sInternal.GetKubeconfigAndNamespace(viper.GetString(KubeconfigOpt)) if err != nil { return fmt.Errorf("failed to build the kubeconfig: %v", err) @@ -177,112 +138,169 @@ func ScorecardTests(cmd *cobra.Command, args []string) error { restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoveryClient) restMapper.Reset() runtimeClient, _ = client.New(kubeconfig, client.Options{Scheme: scheme, Mapper: restMapper}) - if err := createFromYAMLFile(viper.GetString(GlobalManifestOpt)); err != nil { - return fmt.Errorf("failed to create global resources: %v", err) - } - if err := createFromYAMLFile(viper.GetString(NamespacedManifestOpt)); err != nil { - return fmt.Errorf("failed to create namespaced resources: %v", err) - } - if err := createFromYAMLFile(viper.GetString(CRManifestOpt)); err != nil { - return fmt.Errorf("failed to create cr resource: %v", err) - } - obj, err := yamlToUnstructured(viper.GetString(CRManifestOpt)) - if err != nil { - return fmt.Errorf("failed to decode custom resource manifest into object: %s", err) - } - if viper.GetBool(BasicTestsOpt) { - fmt.Println("Checking for existence of spec and status blocks in CR") - err = checkSpecAndStat(runtimeClient, obj, false) + + csv := &olmapiv1alpha1.ClusterServiceVersion{} + if viper.GetBool(OLMTestsOpt) { + yamlSpec, err := ioutil.ReadFile(viper.GetString(CSVPathOpt)) if err != nil { - return err + return fmt.Errorf("failed to read csv: %v", err) } - // This test is far too inconsistent and unreliable to be meaningful, - // so it has been disabled - /* - fmt.Println("Checking that operator actions are reflected in status") - err = checkStatusUpdate(runtimeClient, obj) - if err != nil { - return err - } - */ - fmt.Println("Checking that writing into CRs has an effect") - logs, err := writingIntoCRsHasEffect(obj) - if err != nil { - return err + if err = yaml.Unmarshal(yamlSpec, csv); err != nil { + return fmt.Errorf("error getting ClusterServiceVersion: %v", err) } - log.Debugf("Scorecard Proxy Logs: %v\n", logs) - } else { - // checkSpecAndStat is used to make sure the operator is ready in this case - // the boolean argument set at the end tells the function not to add the result to scTests - err = checkSpecAndStat(runtimeClient, obj, true) + } + + // Extract operator manifests from the CSV if olm-deployed is set. + if viper.GetBool(OlmDeployedOpt) { + // Get deploymentName from the deployment manifest within the CSV. + strat, err := (&olminstall.StrategyResolver{}).UnmarshalStrategy(csv.Spec.InstallStrategy) if err != nil { return err } - } - if viper.GetBool(OLMTestsOpt) { - yamlSpec, err := ioutil.ReadFile(viper.GetString(CSVPathOpt)) - if err != nil { - return fmt.Errorf("failed to read csv: %v", err) + stratDep, ok := strat.(*olminstall.StrategyDetailsDeployment) + if !ok { + return fmt.Errorf("expected StrategyDetailsDeployment, got strategy of type %T", strat) } - rawCSV, _, err := dynamicDecoder.Decode(yamlSpec, nil, nil) + deploymentName = stratDep.DeploymentSpecs[0].Name + // Get the proxy pod, which should have been created with the CSV. + proxyPodGlobal, err = getPodFromDeployment(deploymentName, viper.GetString(NamespaceOpt)) if err != nil { return err } - csv := &olmapiv1alpha1.ClusterServiceVersion{} - switch o := rawCSV.(type) { - case *olmapiv1alpha1.ClusterServiceVersion: - csv = o - default: - return fmt.Errorf("provided yaml file not of ClusterServiceVersion type") + + // Create a temporary CR manifest from metadata if one is not provided. + crJSONStr, ok := csv.ObjectMeta.Annotations["alm-examples"] + if ok && viper.GetString(CRManifestOpt) == "" { + var crs []interface{} + if err = json.Unmarshal([]byte(crJSONStr), &crs); err != nil { + return err + } + // TODO: run scorecard against all CR's in CSV. + cr := crs[0] + crJSONBytes, err := json.Marshal(cr) + if err != nil { + return err + } + crYAMLBytes, err := yaml.JSONToYAML(crJSONBytes) + if err != nil { + return err + } + crFile, err := ioutil.TempFile("", "cr.yaml") + if err != nil { + return err + } + if _, err := crFile.Write(crYAMLBytes); err != nil { + return err + } + viper.Set(CRManifestOpt, crFile.Name()) + defer func() { + err := os.Remove(viper.GetString(CRManifestOpt)) + if err != nil { + log.Errorf("Could not delete temporary CR manifest file: (%v)", err) + } + }() } - fmt.Println("Checking if all CRDs have validation") - if err := crdsHaveValidation(viper.GetString(CRDsDirOpt), runtimeClient, obj); err != nil { - return err + + } else { + // If no namespaced manifest path is given, combine + // deploy/{service_account,role.yaml,role_binding,operator}.yaml. + if viper.GetString(NamespacedManifestOpt) == "" { + file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir) + if err != nil { + return err + } + viper.Set(NamespacedManifestOpt, file.Name()) + defer func() { + err := os.Remove(viper.GetString(NamespacedManifestOpt)) + if err != nil { + log.Errorf("Could not delete temporary namespace manifest file: (%v)", err) + } + }() } - fmt.Println("Checking for CRD resources") - crdsHaveResources(csv) - fmt.Println("Checking for existence of example CRs") - annotationsContainExamples(csv) - fmt.Println("Checking spec descriptors") - err = specDescriptors(csv, runtimeClient, obj) - if err != nil { - return err + // If no global manifest is given, combine all CRD's in the given CRD's dir. + if viper.GetString(GlobalManifestOpt) == "" { + gMan, err := yamlutil.GenerateCombinedGlobalManifest(viper.GetString(CRDsDirOpt)) + if err != nil { + return err + } + viper.Set(GlobalManifestOpt, gMan.Name()) + defer func() { + err := os.Remove(viper.GetString(GlobalManifestOpt)) + if err != nil { + log.Errorf("Could not delete global manifest file: (%v)", err) + } + }() } - fmt.Println("Checking status descriptors") - err = statusDescriptors(csv, runtimeClient, obj) - if err != nil { - return err + if err := createFromYAMLFile(viper.GetString(GlobalManifestOpt)); err != nil { + return fmt.Errorf("failed to create global resources: %v", err) + } + if err := createFromYAMLFile(viper.GetString(NamespacedManifestOpt)); err != nil { + return fmt.Errorf("failed to create namespaced resources: %v", err) } } - var totalEarned, totalMax int - var enabledTestTypes []string + + if err := createFromYAMLFile(viper.GetString(CRManifestOpt)); err != nil { + return fmt.Errorf("failed to create cr resource: %v", err) + } + obj, err := yamlToUnstructured(viper.GetString(CRManifestOpt)) + if err != nil { + return fmt.Errorf("failed to decode custom resource manifest into object: %s", err) + } + if err := waitUntilCRStatusExists(obj); err != nil { + return fmt.Errorf("failed waiting to check if CR status exists: %v", err) + } + var suites []*TestSuite + + // Run tests. if viper.GetBool(BasicTestsOpt) { - enabledTestTypes = append(enabledTestTypes, basicOperator) + conf := BasicTestConfig{ + Client: runtimeClient, + CR: obj, + ProxyPod: proxyPodGlobal, + } + basicTests := NewBasicTestSuite(conf) + basicTests.Run(context.TODO()) + suites = append(suites, basicTests) } if viper.GetBool(OLMTestsOpt) { - enabledTestTypes = append(enabledTestTypes, olmIntegration) + conf := OLMTestConfig{ + Client: runtimeClient, + CR: obj, + CSV: csv, + CRDsDir: viper.GetString(CRDsDirOpt), + ProxyPod: proxyPodGlobal, + } + olmTests := NewOLMTestSuite(conf) + olmTests.Run(context.TODO()) + suites = append(suites, olmTests) } - if viper.GetBool(TenantTestsOpt) { - enabledTestTypes = append(enabledTestTypes, goodTenant) + totalScore := 0.0 + for _, suite := range suites { + fmt.Printf("%s:\n", suite.GetName()) + for _, result := range suite.TestResults { + fmt.Printf("\t%s: %d/%d\n", result.Test.GetName(), result.EarnedPoints, result.MaximumPoints) + } + totalScore += float64(suite.TotalScore()) } - for _, testType := range enabledTestTypes { - fmt.Printf("%s:\n", testType) - for _, test := range scTests { - if test.testType == testType { - if !(test.earnedPoints == 0 && test.maximumPoints == 0) { - fmt.Printf("\t%s: %d/%d points\n", test.name, test.earnedPoints, test.maximumPoints) - } else { - fmt.Printf("\t%s: N/A (depends on an earlier test that failed)\n", test.name) - } - totalEarned += test.earnedPoints - totalMax += test.maximumPoints + totalScore = totalScore / float64(len(suites)) + fmt.Printf("\nTotal Score: %.0f%%\n", totalScore) + // Print suggestions + for _, suite := range suites { + for _, result := range suite.TestResults { + for _, suggestion := range result.Suggestions { + // 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings) + fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion) } } } - fmt.Printf("\nTotal Score: %d/%d points\n", totalEarned, totalMax) - for _, suggestion := range scSuggestions { - // 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings) - fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion) + // Print errors + for _, suite := range suites { + for _, result := range suite.TestResults { + for _, err := range result.Errors { + // 31 is red (specifically, the same shade of red that logrus uses for errors) + fmt.Printf("\x1b[%dmERROR:\x1b[0m %s\n", 31, err) + } + } } return nil } @@ -304,3 +322,23 @@ func initConfig() error { } return nil } + +func validateScorecardFlags() error { + if !viper.GetBool(OlmDeployedOpt) && viper.GetString(CRManifestOpt) == "" { + return errors.New("cr-manifest config option must be set") + } + if !viper.GetBool(BasicTestsOpt) && !viper.GetBool(OLMTestsOpt) { + return errors.New("at least one test type must be set") + } + if viper.GetBool(OLMTestsOpt) && viper.GetString(CSVPathOpt) == "" { + return fmt.Errorf("csv-path must be set if olm-tests is enabled") + } + if viper.GetBool(OlmDeployedOpt) && viper.GetString(CSVPathOpt) == "" { + return fmt.Errorf("csv-path must be set if olm-deployed is enabled") + } + pullPolicy := viper.GetString(ProxyPullPolicyOpt) + if pullPolicy != "Always" && pullPolicy != "Never" && pullPolicy != "PullIfNotPresent" { + return fmt.Errorf("invalid proxy pull policy: (%s); valid values: Always, Never, PullIfNotPresent", pullPolicy) + } + return nil +} diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/test_definitions.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/test_definitions.go new file mode 100644 index 0000000000..216e1a6752 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/scorecard/test_definitions.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scorecard + +import ( + "context" + + olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Type Definitions + +// Test provides methods for running scorecard tests +type Test interface { + GetName() string + GetDescription() string + IsCumulative() bool + Run(context.Context) *TestResult +} + +// TestResult contains a test's points, suggestions, and errors +type TestResult struct { + Test Test + EarnedPoints int + MaximumPoints int + Suggestions []string + Errors []error +} + +// TestInfo contains information about the scorecard test +type TestInfo struct { + Name string + Description string + // If a test is set to cumulative, the scores of multiple runs of the same test on separate CRs are added together for the total score. + // If cumulative is false, if any test failed, the total score is 0/1. Otherwise 1/1. + Cumulative bool +} + +// GetName return the test name +func (i TestInfo) GetName() string { return i.Name } + +// GetDescription returns the test description +func (i TestInfo) GetDescription() string { return i.Description } + +// IsCumulative returns true if the test's scores are intended to be cumulative +func (i TestInfo) IsCumulative() bool { return i.Cumulative } + +// BasicTestConfig contains all variables required by the BasicTest TestSuite +type BasicTestConfig struct { + Client client.Client + CR *unstructured.Unstructured + ProxyPod *v1.Pod +} + +// OLMTestConfig contains all variables required by the OLMTest TestSuite +type OLMTestConfig struct { + Client client.Client + CR *unstructured.Unstructured + CSV *olmapiv1alpha1.ClusterServiceVersion + CRDsDir string + ProxyPod *v1.Pod +} + +// TestSuite contains a list of tests and results, along with the relative weights of each test +type TestSuite struct { + TestInfo + Tests []Test + TestResults []*TestResult + Weights map[string]float64 +} + +// Test definitions + +// CheckSpecTest is a scorecard test that verifies that the CR has a spec block +type CheckSpecTest struct { + TestInfo + BasicTestConfig +} + +// NewCheckSpecTest returns a new CheckSpecTest object +func NewCheckSpecTest(conf BasicTestConfig) *CheckSpecTest { + return &CheckSpecTest{ + BasicTestConfig: conf, + TestInfo: TestInfo{ + Name: "Spec Block Exists", + Description: "Custom Resource has a Spec Block", + Cumulative: false, + }, + } +} + +// CheckStatusTest is a scorecard test that verifies that the CR has a status block +type CheckStatusTest struct { + TestInfo + BasicTestConfig +} + +// NewCheckStatusTest returns a new CheckStatusTest object +func NewCheckStatusTest(conf BasicTestConfig) *CheckStatusTest { + return &CheckStatusTest{ + BasicTestConfig: conf, + TestInfo: TestInfo{ + Name: "Status Block Exists", + Description: "Custom Resource has a Status Block", + Cumulative: false, + }, + } +} + +// WritingIntoCRsHasEffectTest is a scorecard test that verifies that the operator is making PUT and/or POST requests to the API server +type WritingIntoCRsHasEffectTest struct { + TestInfo + BasicTestConfig +} + +// NewWritingIntoCRsHasEffectTest returns a new WritingIntoCRsHasEffectTest object +func NewWritingIntoCRsHasEffectTest(conf BasicTestConfig) *WritingIntoCRsHasEffectTest { + return &WritingIntoCRsHasEffectTest{ + BasicTestConfig: conf, + TestInfo: TestInfo{ + Name: "Writing into CRs has an effect", + Description: "A CR sends PUT/POST requests to the API server to modify resources in response to spec block changes", + Cumulative: false, + }, + } +} + +// CRDsHaveValidationTest is a scorecard test that verifies that all CRDs have a validation section +type CRDsHaveValidationTest struct { + TestInfo + OLMTestConfig +} + +// NewCRDsHaveValidationTest returns a new CRDsHaveValidationTest object +func NewCRDsHaveValidationTest(conf OLMTestConfig) *CRDsHaveValidationTest { + return &CRDsHaveValidationTest{ + OLMTestConfig: conf, + TestInfo: TestInfo{ + Name: "Provided APIs have validation", + Description: "All CRDs have an OpenAPI validation subsection", + Cumulative: true, + }, + } +} + +// CRDsHaveResourcesTest is a scorecard test that verifies that the CSV lists used resources in its owned CRDs secyion +type CRDsHaveResourcesTest struct { + TestInfo + OLMTestConfig +} + +// NewCRDsHaveResourcesTest returns a new CRDsHaveResourcesTest object +func NewCRDsHaveResourcesTest(conf OLMTestConfig) *CRDsHaveResourcesTest { + return &CRDsHaveResourcesTest{ + OLMTestConfig: conf, + TestInfo: TestInfo{ + Name: "Owned CRDs have resources listed", + Description: "All Owned CRDs contain a resources subsection", + Cumulative: true, + }, + } +} + +// AnnotationsContainExamplesTest is a scorecard test that verifies that the CSV contains examples via the alm-examples annotation +type AnnotationsContainExamplesTest struct { + TestInfo + OLMTestConfig +} + +// NewAnnotationsContainExamplesTest returns a new AnnotationsContainExamplesTest object +func NewAnnotationsContainExamplesTest(conf OLMTestConfig) *AnnotationsContainExamplesTest { + return &AnnotationsContainExamplesTest{ + OLMTestConfig: conf, + TestInfo: TestInfo{ + Name: "CRs have at least 1 example", + Description: "The CSV's metadata contains an alm-examples section", + Cumulative: true, + }, + } +} + +// SpecDescriptorsTest is a scorecard test that verifies that all spec fields have descriptors +type SpecDescriptorsTest struct { + TestInfo + OLMTestConfig +} + +// NewSpecDescriptorsTest returns a new SpecDescriptorsTest object +func NewSpecDescriptorsTest(conf OLMTestConfig) *SpecDescriptorsTest { + return &SpecDescriptorsTest{ + OLMTestConfig: conf, + TestInfo: TestInfo{ + Name: "Spec fields with descriptors", + Description: "All spec fields have matching descriptors in the CSV", + Cumulative: true, + }, + } +} + +// StatusDescriptorsTest is a scorecard test that verifies that all status fields have descriptors +type StatusDescriptorsTest struct { + TestInfo + OLMTestConfig +} + +// NewStatusDescriptorsTest returns a new StatusDescriptorsTest object +func NewStatusDescriptorsTest(conf OLMTestConfig) *StatusDescriptorsTest { + return &StatusDescriptorsTest{ + OLMTestConfig: conf, + TestInfo: TestInfo{ + Name: "Status fields with descriptors", + Description: "All status fields have matching descriptors in the CSV", + Cumulative: true, + }, + } +} + +// Test Suite Declarations + +// NewBasicTestSuite returns a new TestSuite object containing basic, functional operator tests +func NewBasicTestSuite(conf BasicTestConfig) *TestSuite { + ts := NewTestSuite( + "Basic Tests", + "Test suite that runs basic, functional operator tests", + ) + ts.AddTest(NewCheckSpecTest(conf), 1.5) + ts.AddTest(NewCheckStatusTest(conf), 1) + ts.AddTest(NewWritingIntoCRsHasEffectTest(conf), 1) + + return ts +} + +// NewOLMTestSuite returns a new TestSuite object containing CSV best practice checks +func NewOLMTestSuite(conf OLMTestConfig) *TestSuite { + ts := NewTestSuite( + "OLM Tests", + "Test suite checks if an operator's CSV follows best practices", + ) + + ts.AddTest(NewCRDsHaveValidationTest(conf), 1.25) + ts.AddTest(NewCRDsHaveResourcesTest(conf), 1) + ts.AddTest(NewAnnotationsContainExamplesTest(conf), 1) + ts.AddTest(NewSpecDescriptorsTest(conf), 1) + ts.AddTest(NewStatusDescriptorsTest(conf), 1) + + return ts +} + +// Helper functions + +// ResultsPassFail will be used when multiple CRs are supported +func ResultsPassFail(results []TestResult) (earned, max int) { + for _, result := range results { + if result.EarnedPoints != result.MaximumPoints { + return 0, 1 + } + } + return 1, 1 +} + +// ResultsCumulative will be used when multiple CRs are supported +func ResultsCumulative(results []TestResult) (earned, max int) { + for _, result := range results { + earned += result.EarnedPoints + max += result.MaximumPoints + } + return earned, max +} + +// AddTest adds a new Test to a TestSuite along with a relative weight for the new Test +func (ts *TestSuite) AddTest(t Test, weight float64) { + ts.Tests = append(ts.Tests, t) + ts.Weights[t.GetName()] = weight +} + +// TotalScore calculates and returns the total score of all run Tests in a TestSuite +func (ts *TestSuite) TotalScore() (score int) { + floatScore := 0.0 + for _, result := range ts.TestResults { + if result.MaximumPoints != 0 { + floatScore += (float64(result.EarnedPoints) / float64(result.MaximumPoints)) * ts.Weights[result.Test.GetName()] + } + } + // scale to a percentage + addedWeights := 0.0 + for _, weight := range ts.Weights { + addedWeights += weight + } + floatScore = floatScore * (100 / addedWeights) + return int(floatScore) +} + +// Run runs all Tests in a TestSuite +func (ts *TestSuite) Run(ctx context.Context) { + for _, test := range ts.Tests { + ts.TestResults = append(ts.TestResults, test.Run(ctx)) + } +} + +// NewTestSuite returns a new TestSuite with a given name and description +func NewTestSuite(name, description string) *TestSuite { + return &TestSuite{ + TestInfo: TestInfo{ + Name: name, + Description: description, + }, + Weights: make(map[string]float64), + } +} diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/cluster.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/cluster.go index 0d5da2ef38..e01a0010ae 100755 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/cluster.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/cluster.go @@ -24,7 +24,6 @@ import ( k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil" "github.com/operator-framework/operator-sdk/internal/util/projutil" "github.com/operator-framework/operator-sdk/pkg/k8sutil" - "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/scaffold" "github.com/operator-framework/operator-sdk/pkg/scaffold/ansible" "github.com/operator-framework/operator-sdk/pkg/test" @@ -113,7 +112,7 @@ func testClusterFunc(cmd *cobra.Command, args []string) error { Name: k8sutil.OperatorNameEnvVar, Value: "test-operator", }, { - Name: leader.PodNameEnv, + Name: k8sutil.PodNameEnvVar, ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"}}, }}, }}, diff --git a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/local.go b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/local.go index f2277a37ed..5577e12450 100644 --- a/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/local.go +++ b/vendor/github.com/operator-framework/operator-sdk/commands/operator-sdk/cmd/test/local.go @@ -122,11 +122,29 @@ func testLocalGoFunc(cmd *cobra.Command, args []string) error { // if no namespaced manifest path is given, combine deploy/service_account.yaml, deploy/role.yaml, deploy/role_binding.yaml and deploy/operator.yaml if tlConfig.namespacedManPath == "" && !tlConfig.noSetup { - file, err := yamlutil.GenerateCombinedNamespacedManifest() - if err != nil { - return err + if !tlConfig.upLocal { + file, err := yamlutil.GenerateCombinedNamespacedManifest(scaffold.DeployDir) + if err != nil { + return err + } + tlConfig.namespacedManPath = file.Name() + } else { + file, err := ioutil.TempFile("", "empty.yaml") + if err != nil { + return fmt.Errorf("could not create empty manifest file: (%v)", err) + } + tlConfig.namespacedManPath = file.Name() + emptyBytes := []byte{} + if err := file.Chmod(os.FileMode(fileutil.DefaultFileMode)); err != nil { + return fmt.Errorf("could not chown temporary namespaced manifest file: (%v)", err) + } + if _, err := file.Write(emptyBytes); err != nil { + return fmt.Errorf("could not write temporary namespaced manifest file: (%v)", err) + } + if err := file.Close(); err != nil { + return err + } } - tlConfig.namespacedManPath = file.Name() defer func() { err := os.Remove(tlConfig.namespacedManPath) if err != nil { @@ -135,7 +153,7 @@ func testLocalGoFunc(cmd *cobra.Command, args []string) error { }() } if tlConfig.globalManPath == "" && !tlConfig.noSetup { - file, err := yamlutil.GenerateCombinedGlobalManifest() + file, err := yamlutil.GenerateCombinedGlobalManifest(scaffold.CRDsDir) if err != nil { return err } diff --git a/vendor/github.com/operator-framework/operator-sdk/hack/image/ansible/scaffold-ansible-image.go b/vendor/github.com/operator-framework/operator-sdk/hack/image/ansible/scaffold-ansible-image.go index 3a76565ee5..9031c7477f 100644 --- a/vendor/github.com/operator-framework/operator-sdk/hack/image/ansible/scaffold-ansible-image.go +++ b/vendor/github.com/operator-framework/operator-sdk/hack/image/ansible/scaffold-ansible-image.go @@ -39,6 +39,7 @@ func main() { &ansible.Entrypoint{}, &ansible.UserSetup{}, &ansible.K8sStatus{}, + &ansible.AoLogs{}, ) if err != nil { log.Fatalf("Add scaffold failed: (%v)", err) diff --git a/vendor/github.com/operator-framework/operator-sdk/images/scorecard-proxy/cmd/proxy/main.go b/vendor/github.com/operator-framework/operator-sdk/images/scorecard-proxy/cmd/proxy/main.go index d3a4b6d01b..3e837945a3 100644 --- a/vendor/github.com/operator-framework/operator-sdk/images/scorecard-proxy/cmd/proxy/main.go +++ b/vendor/github.com/operator-framework/operator-sdk/images/scorecard-proxy/cmd/proxy/main.go @@ -60,7 +60,7 @@ func main() { KubeConfig: mgr.GetConfig(), RESTMapper: mgr.GetRESTMapper(), ControllerMap: cMap, - NoOwnerInjection: true, + OwnerInjection: false, LogRequests: true, WatchedNamespaces: []string{namespace}, DisableCache: true, diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/fileutil/file_util.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/fileutil/file_util.go index 523047a4fb..f3258854a6 100644 --- a/vendor/github.com/operator-framework/operator-sdk/internal/util/fileutil/file_util.go +++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/fileutil/file_util.go @@ -42,10 +42,6 @@ type FileWriter struct { once sync.Once } -func NewFileWriter() *FileWriter { - return NewFileWriterFS(afero.NewOsFs()) -} - func NewFileWriterFS(fs afero.Fs) *FileWriter { fw := &FileWriter{} fw.once.Do(func() { diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/crd.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/crd.go new file mode 100644 index 0000000000..8b292ecb97 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/crd.go @@ -0,0 +1,48 @@ +package k8sutil + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + yaml "github.com/ghodss/yaml" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +func GetCRDs(crdsDir string) ([]*apiextv1beta1.CustomResourceDefinition, error) { + manifests, err := GetCRDManifestPaths(crdsDir) + if err != nil { + return nil, fmt.Errorf("failed to get CRD's from %s: (%v)", crdsDir, err) + } + var crds []*apiextv1beta1.CustomResourceDefinition + for _, m := range manifests { + b, err := ioutil.ReadFile(m) + if err != nil { + return nil, err + } + crd := &apiextv1beta1.CustomResourceDefinition{} + if err = yaml.Unmarshal(b, crd); err != nil { + return nil, err + } + crds = append(crds, crd) + } + return crds, nil +} + +func GetCRDManifestPaths(crdsDir string) (crdPaths []string, err error) { + err = filepath.Walk(crdsDir, func(path string, info os.FileInfo, werr error) error { + if werr != nil { + return werr + } + if info == nil { + return nil + } + if !info.IsDir() && strings.HasSuffix(path, "_crd.yaml") { + crdPaths = append(crdPaths, path) + } + return nil + }) + return crdPaths, err +} diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/object.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/object.go new file mode 100644 index 0000000000..3080c54006 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/k8sutil/object.go @@ -0,0 +1,40 @@ +package k8sutil + +import ( + yaml "github.com/ghodss/yaml" + "k8s.io/apimachinery/pkg/runtime" +) + +// GetObjectBytes marshalls an object and removes runtime-managed fields: +// 'status', 'creationTimestamp' +func GetObjectBytes(obj interface{}) ([]byte, error) { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + deleteKeys := []string{"status", "creationTimestamp"} + for _, dk := range deleteKeys { + deleteKeyFromUnstructured(u, dk) + } + return yaml.Marshal(u) +} + +func deleteKeyFromUnstructured(u map[string]interface{}, key string) { + if _, ok := u[key]; ok { + delete(u, key) + return + } + + for _, v := range u { + switch t := v.(type) { + case map[string]interface{}: + deleteKeyFromUnstructured(t, key) + case []interface{}: + for _, ti := range t { + if m, ok := ti.(map[string]interface{}); ok { + deleteKeyFromUnstructured(m, key) + } + } + } + } +} diff --git a/vendor/github.com/operator-framework/operator-sdk/internal/util/yamlutil/manifest.go b/vendor/github.com/operator-framework/operator-sdk/internal/util/yamlutil/manifest.go index 2913c50331..116fc3606c 100644 --- a/vendor/github.com/operator-framework/operator-sdk/internal/util/yamlutil/manifest.go +++ b/vendor/github.com/operator-framework/operator-sdk/internal/util/yamlutil/manifest.go @@ -54,8 +54,8 @@ func CombineManifests(base []byte, manifests ...[]byte) []byte { } // GenerateCombinedNamespacedManifest creates a temporary manifest yaml -// containing all standard namespaced resource manifests combined into 1 file -func GenerateCombinedNamespacedManifest() (*os.File, error) { +// by combining all standard namespaced resource manifests in deployDir. +func GenerateCombinedNamespacedManifest(deployDir string) (*os.File, error) { file, err := ioutil.TempFile("", "namespaced-manifest.yaml") if err != nil { return nil, err @@ -66,19 +66,19 @@ func GenerateCombinedNamespacedManifest() (*os.File, error) { } }() - sa, err := ioutil.ReadFile(filepath.Join(scaffold.DeployDir, scaffold.ServiceAccountYamlFile)) + sa, err := ioutil.ReadFile(filepath.Join(deployDir, scaffold.ServiceAccountYamlFile)) if err != nil { log.Warnf("Could not find the serviceaccount manifest: (%v)", err) } - role, err := ioutil.ReadFile(filepath.Join(scaffold.DeployDir, scaffold.RoleYamlFile)) + role, err := ioutil.ReadFile(filepath.Join(deployDir, scaffold.RoleYamlFile)) if err != nil { log.Warnf("Could not find role manifest: (%v)", err) } - roleBinding, err := ioutil.ReadFile(filepath.Join(scaffold.DeployDir, scaffold.RoleBindingYamlFile)) + roleBinding, err := ioutil.ReadFile(filepath.Join(deployDir, scaffold.RoleBindingYamlFile)) if err != nil { log.Warnf("Could not find role_binding manifest: (%v)", err) } - operator, err := ioutil.ReadFile(filepath.Join(scaffold.DeployDir, scaffold.OperatorYamlFile)) + operator, err := ioutil.ReadFile(filepath.Join(deployDir, scaffold.OperatorYamlFile)) if err != nil { return nil, fmt.Errorf("could not find operator manifest: (%v)", err) } @@ -98,8 +98,8 @@ func GenerateCombinedNamespacedManifest() (*os.File, error) { } // GenerateCombinedGlobalManifest creates a temporary manifest yaml -// containing all standard global resource manifests combined into 1 file -func GenerateCombinedGlobalManifest() (*os.File, error) { +// by combining all standard global resource manifests in crdsDir. +func GenerateCombinedGlobalManifest(crdsDir string) (*os.File, error) { file, err := ioutil.TempFile("", "global-manifest.yaml") if err != nil { return nil, err @@ -110,16 +110,16 @@ func GenerateCombinedGlobalManifest() (*os.File, error) { } }() - files, err := ioutil.ReadDir(scaffold.CRDsDir) + files, err := ioutil.ReadDir(crdsDir) if err != nil { return nil, fmt.Errorf("could not read deploy directory: (%v)", err) } combined := []byte{} for _, file := range files { if strings.HasSuffix(file.Name(), "crd.yaml") { - fileBytes, err := ioutil.ReadFile(filepath.Join(scaffold.CRDsDir, file.Name())) + fileBytes, err := ioutil.ReadFile(filepath.Join(crdsDir, file.Name())) if err != nil { - return nil, fmt.Errorf("could not read file %s: (%v)", filepath.Join(scaffold.CRDsDir, file.Name()), err) + return nil, fmt.Errorf("could not read file %s: (%v)", filepath.Join(crdsDir, file.Name()), err) } combined = CombineManifests(combined, fileBytes) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/controller.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/controller.go index 5e4de6f56c..02accab26b 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/controller.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/controller.go @@ -47,6 +47,7 @@ type Options struct { ManageStatus bool WatchDependentResources bool WatchClusterScopedResources bool + MaxWorkers int } // Add - Creates a new ansible operator controller and adds it to the manager @@ -82,7 +83,8 @@ func Add(mgr manager.Manager, options Options) *controller.Controller { //Create new controller runtime controller and set the controller to watch GVK. c, err := controller.New(fmt.Sprintf("%v-controller", strings.ToLower(options.GVK.Kind)), mgr, controller.Options{ - Reconciler: aor, + Reconciler: aor, + MaxConcurrentReconciles: options.MaxWorkers, }) if err != nil { log.Error(err, "") diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go index e393491118..cd43b277c4 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/controller/reconcile.go @@ -18,6 +18,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "math/rand" "os" "strconv" @@ -42,7 +43,7 @@ import ( ) const ( - // ReconcilePeriodAnnotation - annotation used by a user to specify the reconcilation interval for the CR. + // ReconcilePeriodAnnotation - annotation used by a user to specify the reconciliation interval for the CR. // To use create a CR with an annotation "ansible.operator-sdk/reconcile-period: 30s" or some other valid // Duration. This will override the operators/or controllers reconcile period for that particular CR. ReconcilePeriodAnnotation = "ansible.operator-sdk/reconcile-period" @@ -81,6 +82,9 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc if ds, ok := u.GetAnnotations()[ReconcilePeriodAnnotation]; ok { duration, err := time.ParseDuration(ds) if err != nil { + // Should attempt to update to a failed condition + r.markError(u, request.NamespacedName, fmt.Sprintf("Unable to parse reconcile period annotation: %v", err)) + logger.Error(err, "Unable to parse reconcile period annotation") return reconcileResult, err } reconcileResult.RequeueAfter = duration @@ -96,28 +100,30 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc u.SetFinalizers(finalizers) err := r.Client.Update(context.TODO(), u) if err != nil { + logger.Error(err, "Unable to update cr with finalizer") return reconcileResult, err } } if !contains(pendingFinalizers, finalizer) && deleted { - logger.Info("Resource is terminated, skipping reconcilation") + logger.Info("Resource is terminated, skipping reconciliation") return reconcile.Result{}, nil } spec := u.Object["spec"] _, ok := spec.(map[string]interface{}) + // Need to handle cases where there is no spec. + // We can add the spec to the object, which will allow + // everything to work, and will not get updated. + // Therefore we can now deal with the case of secrets and configmaps. if !ok { logger.V(1).Info("Spec was not found") u.Object["spec"] = map[string]interface{}{} - err = r.Client.Update(context.TODO(), u) - if err != nil { - return reconcileResult, err - } } if r.ManageStatus { err = r.markRunning(u, request.NamespacedName) if err != nil { + logger.Error(err, "Unable to update the status to mark cr as running") return reconcileResult, err } } @@ -131,6 +137,8 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc kc, err := kubeconfig.Create(ownerRef, "http://localhost:8888", u.GetNamespace()) if err != nil { + r.markError(u, request.NamespacedName, "Unable to run reconciliation") + logger.Error(err, "Unable to generate kubeconfig") return reconcileResult, err } defer func() { @@ -140,6 +148,8 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc }() result, err := r.Runner.Run(ident, u, kc.Name()) if err != nil { + r.markError(u, request.NamespacedName, "Unable to run reconciliation") + logger.Error(err, "Unable to run ansible runner") return reconcileResult, err } @@ -161,7 +171,7 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc return reconcile.Result{}, err } } - if event.Event == eventapi.EventRunnerOnFailed { + if event.Event == eventapi.EventRunnerOnFailed && !event.IgnoreError() { failureMessages = append(failureMessages, event.GetFailedPlaybookMessage()) } } @@ -189,6 +199,7 @@ func (r *AnsibleOperatorReconciler) Reconcile(request reconcile.Request) (reconc u.SetFinalizers(finalizers) err := r.Client.Update(context.TODO(), u) if err != nil { + logger.Error(err, "Failed to remove finalizer") return reconcileResult, err } } @@ -237,6 +248,47 @@ func (r *AnsibleOperatorReconciler) markRunning(u *unstructured.Unstructured, na return nil } +// markError - used to alert the user to the issues during the validation of a reconcile run. +// i.e Annotations that could be incorrect +func (r *AnsibleOperatorReconciler) markError(u *unstructured.Unstructured, namespacedName types.NamespacedName, failureMessage string) error { + logger := logf.Log.WithName("markError") + // Get the latest resource to prevent updating a stale status + err := r.Client.Get(context.TODO(), namespacedName, u) + if apierrors.IsNotFound(err) { + logger.Info("Resource not found, assuming it was deleted", err) + return nil + } + if err != nil { + return err + } + statusInterface := u.Object["status"] + statusMap, ok := statusInterface.(map[string]interface{}) + // If the map is not available create one. + if !ok { + statusMap = map[string]interface{}{} + } + crStatus := ansiblestatus.CreateFromMap(statusMap) + + sc := ansiblestatus.GetCondition(crStatus, ansiblestatus.RunningConditionType) + if sc != nil { + sc.Status = v1.ConditionFalse + ansiblestatus.SetCondition(&crStatus, *sc) + } + + c := ansiblestatus.NewCondition( + ansiblestatus.FailureConditionType, + v1.ConditionTrue, + nil, + ansiblestatus.FailedReason, + failureMessage, + ) + ansiblestatus.SetCondition(&crStatus, *c) + // This needs the status subresource to be enabled by default. + u.Object["status"] = crStatus.GetJSONMap() + + return r.Client.Status().Update(context.TODO(), u) +} + func (r *AnsibleOperatorReconciler) markDone(u *unstructured.Unstructured, namespacedName types.NamespacedName, statusEvent eventapi.StatusJobEvent, failureMessages eventapi.FailureMessages) error { logger := logf.Log.WithName("markDone") // Get the latest resource to prevent updating a stale status diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/flags/flag.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/flags/flag.go index 0830351ace..113f33956b 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/flags/flag.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/flags/flag.go @@ -15,6 +15,8 @@ package flags import ( + "strings" + "github.com/operator-framework/operator-sdk/pkg/internal/flags" "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/spf13/pflag" @@ -23,6 +25,8 @@ import ( // AnsibleOperatorFlags - Options to be used by an ansible operator type AnsibleOperatorFlags struct { flags.WatchFlags + InjectOwnerRef bool + MaxWorkers int } // AddTo - Add the ansible operator flags to the the flagset @@ -31,5 +35,17 @@ func AddTo(flagSet *pflag.FlagSet, helpTextPrefix ...string) *AnsibleOperatorFla aof := &AnsibleOperatorFlags{} aof.WatchFlags.AddTo(flagSet, helpTextPrefix...) flagSet.AddFlagSet(zap.FlagSet()) + flagSet.BoolVar(&aof.InjectOwnerRef, + "inject-owner-ref", + true, + strings.Join(append(helpTextPrefix, "The ansible operator will inject owner references unless this flag is false"), " "), + ) + flagSet.IntVar(&aof.MaxWorkers, + "max-workers", + 1, + strings.Join(append(helpTextPrefix, + "Maximum number of workers to use. Overridden by environment variable."), + " "), + ) return aof } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/operator/operator.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/operator/operator.go index 5f550cf54a..9925e639b1 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/operator/operator.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/operator/operator.go @@ -16,9 +16,15 @@ package operator import ( "errors" + "fmt" "math/rand" + "os" + "strconv" + "strings" "time" + "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/operator-framework/operator-sdk/pkg/ansible/controller" "github.com/operator-framework/operator-sdk/pkg/ansible/flags" "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap" @@ -43,10 +49,20 @@ func Run(done chan error, mgr manager.Manager, f *flags.AnsibleOperatorFlags, cM c := signals.SetupSignalHandler() for gvk, runner := range watches { + + // if the WORKER_* environment variable is set, use that value. + // Otherwise, use the value from the CLI. This is definitely + // counter-intuitive but it allows the operator admin adjust the + // number of workers based on their cluster resources. While the + // author may use the CLI option to specify a suggested + // configuration for the operator. + maxWorkers := getMaxWorkers(gvk, f.MaxWorkers) + o := controller.Options{ GVK: gvk, Runner: runner, ManageStatus: runner.GetManageStatus(), + MaxWorkers: maxWorkers, } applyFlagsToControllerOptions(f, &o) if d, ok := runner.GetReconcilePeriod(); ok { @@ -67,6 +83,24 @@ func Run(done chan error, mgr manager.Manager, f *flags.AnsibleOperatorFlags, cM done <- mgr.Start(c) } +func getMaxWorkers(gvk schema.GroupVersionKind, defvalue int) int { + envvar := formatEnvVar(gvk.Kind, gvk.Group) + maxWorkers, err := strconv.Atoi(os.Getenv(envvar)) + if err != nil { + // we don't care why we couldn't parse it just use one. + // maybe we should log that we are defaulting to 1. + logf.Log.WithName("manager").V(0).Info(fmt.Sprintf("Using default value for workers %d", defvalue)) + return defvalue + } + + return maxWorkers +} + +func formatEnvVar(kind string, group string) string { + envvar := fmt.Sprintf("WORKER_%s_%s", kind, group) + return strings.ToUpper(strings.Replace(envvar, ".", "_", -1)) +} + func applyFlagsToControllerOptions(f *flags.AnsibleOperatorFlags, o *controller.Options) { o.ReconcilePeriod = f.ReconcilePeriod } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go index 299caf4650..f9d6b6ae89 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/proxy/proxy.go @@ -204,7 +204,7 @@ func CacheResponseHandler(h http.Handler, informerCache cache.Cache, restMapper } // InjectOwnerReferenceHandler will handle proxied requests and inject the -// owner refernece found in the authorization header. The Authorization is +// owner reference found in the authorization header. The Authorization is // then deleted so that the proxy can re-set with the correct authorization. func InjectOwnerReferenceHandler(h http.Handler, cMap *controllermap.ControllerMap, restMapper meta.RESTMapper, watchedNamespaces map[string]interface{}) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -278,7 +278,12 @@ func InjectOwnerReferenceHandler(h http.Handler, cMap *controllermap.ControllerM } } } - // Removing the authorization so that the proxy can set the correct authorization. + h.ServeHTTP(w, req) + }) +} + +func removeAuthorizationHeader(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { req.Header.Del("Authorization") h.ServeHTTP(w, req) }) @@ -312,7 +317,7 @@ type Options struct { Address string Port int Handler HandlerChain - NoOwnerInjection bool + OwnerInjection bool LogRequests bool KubeConfig *rest.Config Cache cache.Cache @@ -369,14 +374,18 @@ func Run(done chan error, o Options) error { o.Cache = informerCache } - if !o.NoOwnerInjection { + server.Handler = removeAuthorizationHeader(server.Handler) + + if o.OwnerInjection { server.Handler = InjectOwnerReferenceHandler(server.Handler, o.ControllerMap, o.RESTMapper, watchedNamespaceMap) + } else { + log.Info("Warning: injection of owner references and dependent watches is turned off") } if o.LogRequests { server.Handler = RequestLogHandler(server.Handler) } if !o.DisableCache { - server.Handler = CacheResponseHandler(server.Handler, o.Cache, o.RESTMapper, watchedNamespaceMap, o.ControllerMap, !o.NoOwnerInjection) + server.Handler = CacheResponseHandler(server.Handler, o.Cache, o.RESTMapper, watchedNamespaceMap, o.ControllerMap, o.OwnerInjection) } l, err := server.Listen(o.Address, o.Port) diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go index eb713314f2..c07fe9230d 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/run.go @@ -93,6 +93,7 @@ func Run(flags *aoflags.AnsibleOperatorFlags) error { Cache: mgr.GetCache(), RESTMapper: mgr.GetRESTMapper(), ControllerMap: cMap, + OwnerInjection: flags.InjectOwnerRef, WatchedNamespaces: []string{namespace}, }) if err != nil { diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi/types.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi/types.go index 3f0492547e..f8098da3e8 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi/types.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/eventapi/types.go @@ -110,3 +110,15 @@ func (je JobEvent) GetFailedPlaybookMessage() string { } return message } + +// IgnoreError - Does the job event contain the ignore_error ansible flag +func (je JobEvent) IgnoreError() bool { + ignoreErrors, ok := je.EventData["ignore_errors"] + if !ok { + return false + } + if b, ok := ignoreErrors.(bool); ok && b { + return b + } + return false +} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/internal/inputdir/inputdir.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/internal/inputdir/inputdir.go index 1aa08a91ae..cca03b50b4 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/internal/inputdir/inputdir.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/internal/inputdir/inputdir.go @@ -132,7 +132,7 @@ func (i *InputDir) Write() error { return err } - // ANSIBLE_INVENTORY takes precendence over our generated hosts file + // ANSIBLE_INVENTORY takes precedence over our generated hosts file // so if the envvar is set we don't bother making it, we just copy // the inventory into our runner directory ansible_inventory := os.Getenv("ANSIBLE_INVENTORY") diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go index 71f5746d55..5c3786638c 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ansible/runner/runner.go @@ -297,7 +297,21 @@ func (r *runner) Run(ident string, u *unstructured.Unstructured, kubeconfig stri if err != nil && err != http.ErrServerClosed { logger.Error(err, "Error from event API") } + + // link the current run to the `latest` directory under artifacts + currentRun := filepath.Join(inputDir.Path, "artifacts", ident) + latestArtifacts := filepath.Join(inputDir.Path, "artifacts", "latest") + if _, err = os.Lstat(latestArtifacts); err == nil { + if err = os.Remove(latestArtifacts); err != nil { + logger.Error(err, "Error removing the latest artifacts symlink") + } + } + if err = os.Symlink(currentRun, latestArtifacts); err != nil { + logger.Error(err, "Error symlinking latest artifacts") + } + }() + return &runResult{ events: receiver.Events, inputDir: &inputDir, diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/run.go b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/run.go index d9cf3fefe9..f110dc9e8b 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/helm/run.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/helm/run.go @@ -53,9 +53,13 @@ func Run(flags *hoflags.HelmOperatorFlags) error { namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) log = log.WithValues("Namespace", namespace) if found { - log.Info("Watching single namespace.") + if namespace == metav1.NamespaceAll { + log.Info("Watching all namespaces.") + } else { + log.Info("Watching single namespace.") + } } else { - log.Info(fmt.Sprintf("%v environment variable not set. This operator is watching all namespaces.", + log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.", k8sutil.WatchNamespaceEnvVar)) namespace = metav1.NamespaceAll } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/constants.go b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/constants.go index d1842a2845..a598cb5a6f 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/constants.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/constants.go @@ -25,6 +25,10 @@ const ( WatchNamespaceEnvVar = "WATCH_NAMESPACE" // OperatorNameEnvVar is the constant for env variable OPERATOR_NAME - // wich is the name of the current operator + // which is the name of the current operator OperatorNameEnvVar = "OPERATOR_NAME" + + // PodNameEnvVar is the constant for env variable POD_NAME + // which is the name of the current pod. + PodNameEnvVar = "POD_NAME" ) diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go index 6ff8533e3b..2272b42db0 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go @@ -15,12 +15,16 @@ package k8sutil import ( + "context" "fmt" "io/ioutil" "os" "strings" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" discovery "k8s.io/client-go/discovery" + crclient "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) @@ -83,3 +87,32 @@ func ResourceExists(dc discovery.DiscoveryInterface, apiGroupVersion, kind strin } return false, nil } + +// GetPod returns a Pod object that corresponds to the pod in which the code +// is currently running. +// It expects the environment variable POD_NAME to be set by the downwards API. +func GetPod(ctx context.Context, client crclient.Client, ns string) (*corev1.Pod, error) { + podName := os.Getenv(PodNameEnvVar) + if podName == "" { + return nil, fmt.Errorf("required env %s not set, please configure downward API", PodNameEnvVar) + } + + log.V(1).Info("Found podname", "Pod.Name", podName) + + pod := &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + } + key := crclient.ObjectKey{Namespace: ns, Name: podName} + err := client.Get(ctx, key, pod) + if err != nil { + log.Error(err, "Failed to get Pod", "Pod.Namespace", ns, "Pod.Name", podName) + return nil, err + } + + log.V(1).Info("Found Pod", "Pod.Namespace", ns, "Pod.Name", pod.Name) + + return pod, nil +} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/leader/leader.go b/vendor/github.com/operator-framework/operator-sdk/pkg/leader/leader.go index 3e1e309b42..94fa444b09 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/leader/leader.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/leader/leader.go @@ -16,8 +16,6 @@ package leader import ( "context" - "fmt" - "os" "time" "github.com/operator-framework/operator-sdk/pkg/k8sutil" @@ -37,8 +35,6 @@ var log = logf.Log.WithName("leader") // attempts to become the leader. const maxBackoffInterval = time.Second * 16 -const PodNameEnv = "POD_NAME" - // Become ensures that the current pod is the leader within its namespace. If // run outside a cluster, it will skip leader election and return nil. It // continuously tries to create a ConfigMap with the provided name and the @@ -143,24 +139,8 @@ func Become(ctx context.Context, lockName string) error { // this code is currently running. // It expects the environment variable POD_NAME to be set by the downwards API func myOwnerRef(ctx context.Context, client crclient.Client, ns string) (*metav1.OwnerReference, error) { - podName := os.Getenv(PodNameEnv) - if podName == "" { - return nil, fmt.Errorf("required env %s not set, please configure downward API", PodNameEnv) - } - - log.V(1).Info("Found podname", "Pod.Name", podName) - - myPod := &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Pod", - }, - } - - key := crclient.ObjectKey{Namespace: ns, Name: podName} - err := client.Get(ctx, key, myPod) + myPod, err := k8sutil.GetPod(ctx, client, ns) if err != nil { - log.Error(err, "Failed to get pod", "Pod.Namespace", ns, "Pod.Name", podName) return nil, err } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go index 67d0080edd..cba8183a0a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go @@ -37,8 +37,8 @@ func init() { zapFlagSet = pflag.NewFlagSet("zap", pflag.ExitOnError) zapFlagSet.BoolVar(&development, "zap-devel", false, "Enable zap development mode (changes defaults to console encoder, debug log level, and disables sampling)") zapFlagSet.Var(&encoderVal, "zap-encoder", "Zap log encoding ('json' or 'console')") - zapFlagSet.Var(&levelVal, "zap-level", "Zap log level (one of 'debug', 'info', 'error')") - zapFlagSet.Var(&sampleVal, "zap-sample", "Enable zap log sampling") + zapFlagSet.Var(&levelVal, "zap-level", "Zap log level (one of 'debug', 'info', 'error' or any integer value > 0)") + zapFlagSet.Var(&sampleVal, "zap-sample", "Enable zap log sampling. Sampling will be disabled for integer log levels > 1") } func FlagSet() *pflag.FlagSet { @@ -91,11 +91,28 @@ type levelValue struct { func (v *levelValue) Set(l string) error { v.set = true lower := strings.ToLower(l) + var lvl int switch lower { - case "debug", "info", "error": - return v.level.Set(l) + case "debug": + lvl = -1 + case "info": + lvl = 0 + case "error": + lvl = 2 + default: + i, err := strconv.Atoi(lower) + if err != nil { + return fmt.Errorf("invalid log level \"%s\"", l) + } + + if i > 0 { + lvl = -1 * i + } else { + return fmt.Errorf("invalid log level \"%s\"", l) + } } - return fmt.Errorf("invalid log level \"%s\"", l) + v.level = zapcore.Level(int8(lvl)) + return nil } func (v levelValue) String() string { diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/logger.go b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/logger.go index 316669544b..795cbff3f3 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/logger.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/logger.go @@ -32,7 +32,7 @@ func Logger() logr.Logger { func LoggerTo(destWriter io.Writer) logr.Logger { syncer := zapcore.AddSync(destWriter) - conf := getConfig(destWriter) + conf := getConfig() conf.encoder = &logf.KubeAwareEncoder{Encoder: conf.encoder, Verbose: conf.level.Level() < 0} if conf.sample { @@ -53,7 +53,7 @@ type config struct { opts []zap.Option } -func getConfig(destWriter io.Writer) config { +func getConfig() config { var c config // Set the defaults depending on the log mode (development vs. production) @@ -80,5 +80,10 @@ func getConfig(destWriter io.Writer) config { c.sample = sampleVal.sample } + // Disable sampling when we are in debug mode. Otherwise, this will + // cause index out of bounds errors in the sampling code. + if c.level.Level() < -1 { + c.sample = false + } return c } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/metrics.go b/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/metrics.go index 1ea736ef58..d213c262d0 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/metrics.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/metrics.go @@ -23,23 +23,32 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/rest" crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) var log = logf.Log.WithName("metrics") -// PrometheusPortName defines the port name used in kubernetes deployment and service resources -const PrometheusPortName = "metrics" +var trueVar = true + +const ( + // PrometheusPortName defines the port name used in the metrics Service. + PrometheusPortName = "metrics" +) // ExposeMetricsPort creates a Kubernetes Service to expose the passed metrics port. func ExposeMetricsPort(ctx context.Context, port int32) (*v1.Service, error) { + client, err := createClient() + if err != nil { + return nil, fmt.Errorf("failed to create new client: %v", err) + } // We do not need to check the validity of the port, as controller-runtime // would error out and we would never get to this stage. - s, err := initOperatorService(port, PrometheusPortName) + s, err := initOperatorService(ctx, client, port, PrometheusPortName) if err != nil { if err == k8sutil.ErrNoNamespace { log.Info("Skipping metrics Service creation; not running in a cluster.") @@ -47,7 +56,7 @@ func ExposeMetricsPort(ctx context.Context, port int32) (*v1.Service, error) { } return nil, fmt.Errorf("failed to initialize service object for metrics: %v", err) } - service, err := createService(ctx, s) + service, err := createOrUpdateService(ctx, client, s) if err != nil { return nil, fmt.Errorf("failed to create or get service for metrics: %v", err) } @@ -55,40 +64,37 @@ func ExposeMetricsPort(ctx context.Context, port int32) (*v1.Service, error) { return service, nil } -func createService(ctx context.Context, s *v1.Service) (*v1.Service, error) { - config, err := rest.InClusterConfig() - if err != nil { - return nil, err - } - - client, err := crclient.New(config, crclient.Options{}) - if err != nil { - return nil, err - } - +func createOrUpdateService(ctx context.Context, client crclient.Client, s *v1.Service) (*v1.Service, error) { if err := client.Create(ctx, s); err != nil { if !apierrors.IsAlreadyExists(err) { return nil, err } - // Get existing Service and return it + // Service already exists, we want to update it + // as we do not know if any fields might have changed. existingService := &v1.Service{} err := client.Get(ctx, types.NamespacedName{ Name: s.Name, Namespace: s.Namespace, }, existingService) + + s.ResourceVersion = existingService.ResourceVersion + if existingService.Spec.Type == v1.ServiceTypeClusterIP { + s.Spec.ClusterIP = existingService.Spec.ClusterIP + } + err = client.Update(ctx, s) if err != nil { return nil, err } - log.Info("Metrics Service object already exists", "name", existingService.Name) + log.V(1).Info("Metrics Service object updated", "Service.Name", s.Name, "Service.Namespace", s.Namespace) return existingService, nil } - log.Info("Metrics Service object created", "name", s.Name) + log.Info("Metrics Service object created", "Service.Name", s.Name, "Service.Namespace", s.Namespace) return s, nil } -// initOperatorService returns the static service which exposes specifed port. -func initOperatorService(port int32, portName string) (*v1.Service, error) { +// initOperatorService returns the static service which exposes specified port. +func initOperatorService(ctx context.Context, client crclient.Client, port int32, portName string) (*v1.Service, error) { operatorName, err := k8sutil.GetOperatorName() if err != nil { return nil, err @@ -97,11 +103,14 @@ func initOperatorService(port int32, portName string) (*v1.Service, error) { if err != nil { return nil, err } + + label := map[string]string{"name": operatorName} + service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: operatorName, Namespace: namespace, - Labels: map[string]string{"name": operatorName}, + Labels: label, }, TypeMeta: metav1.TypeMeta{ Kind: "Service", @@ -119,8 +128,72 @@ func initOperatorService(port int32, portName string) (*v1.Service, error) { Name: portName, }, }, - Selector: map[string]string{"name": operatorName}, + Selector: label, }, } + + ownRef, err := getPodOwnerRef(ctx, client, namespace) + if err != nil { + return nil, err + } + service.SetOwnerReferences([]metav1.OwnerReference{*ownRef}) + return service, nil } + +func getPodOwnerRef(ctx context.Context, client crclient.Client, ns string) (*metav1.OwnerReference, error) { + // Get current Pod the operator is running in + pod, err := k8sutil.GetPod(ctx, client, ns) + if err != nil { + return nil, err + } + podOwnerRefs := metav1.NewControllerRef(pod, pod.GroupVersionKind()) + // Get Owner that the Pod belongs to + ownerRef := metav1.GetControllerOf(pod) + finalOwnerRef, err := findFinalOwnerRef(ctx, client, ns, ownerRef) + if err != nil { + return nil, err + } + if finalOwnerRef != nil { + return finalOwnerRef, nil + } + + // Default to returning Pod as the Owner + return podOwnerRefs, nil +} + +// findFinalOwnerRef tries to locate the final controller/owner based on the owner reference provided. +func findFinalOwnerRef(ctx context.Context, client crclient.Client, ns string, ownerRef *metav1.OwnerReference) (*metav1.OwnerReference, error) { + if ownerRef == nil { + return nil, nil + } + + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(ownerRef.APIVersion) + obj.SetKind(ownerRef.Kind) + err := client.Get(ctx, types.NamespacedName{Namespace: ns, Name: ownerRef.Name}, obj) + if err != nil { + return nil, err + } + newOwnerRef := metav1.GetControllerOf(obj) + if newOwnerRef != nil { + return findFinalOwnerRef(ctx, client, ns, newOwnerRef) + } + + log.V(1).Info("Pods owner found", "Kind", ownerRef.Kind, "Name", ownerRef.Name, "Namespace", ns) + return ownerRef, nil +} + +func createClient() (crclient.Client, error) { + config, err := config.GetConfig() + if err != nil { + return nil, err + } + + client, err := crclient.New(config, crclient.Options{}) + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/ready/ready.go b/vendor/github.com/operator-framework/operator-sdk/pkg/ready/ready.go index 6846ccce1f..da46d72afc 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/ready/ready.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/ready/ready.go @@ -42,7 +42,7 @@ func NewFileReady() Ready { type fileReady struct{} -// Set creates a file on disk whose presense can be used by a readiness probe +// Set creates a file on disk whose presence can be used by a readiness probe // to determine that the operator is ready. func (r fileReady) Set() error { f, err := os.Create(FileName) diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/addtoscheme.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/addtoscheme.go index 343f8b263e..e289b1d75f 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/addtoscheme.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/addtoscheme.go @@ -33,7 +33,7 @@ type AddToScheme struct { func (s *AddToScheme) GetInput() (input.Input, error) { if s.Path == "" { fileName := fmt.Sprintf("addtoscheme_%s_%s.go", - strings.ToLower(s.Resource.Group), + s.Resource.GoImportGroup, strings.ToLower(s.Resource.Version)) s.Path = filepath.Join(ApisDir, fileName) } @@ -44,7 +44,7 @@ func (s *AddToScheme) GetInput() (input.Input, error) { const addToSchemeTemplate = `package apis import ( - "{{ .Repo }}/pkg/apis/{{ .Resource.Group }}/{{ .Resource.Version }}" + "{{ .Repo }}/pkg/apis/{{ .Resource.GoImportGroup}}/{{ .Resource.Version }}" ) func init() { diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/ao_logs.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/ao_logs.go new file mode 100644 index 0000000000..49d4a2da17 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/ao_logs.go @@ -0,0 +1,50 @@ +// Copyright 2019 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ansible + +import ( + "path/filepath" + + "github.com/operator-framework/operator-sdk/pkg/scaffold/input" +) + +//DockerfileHybrid - Dockerfile for a hybrid operator +type AoLogs struct { + input.Input +} + +// GetInput - gets the input +func (a *AoLogs) GetInput() (input.Input, error) { + if a.Path == "" { + a.Path = filepath.Join("bin", "ao-logs") + } + a.TemplateBody = aoLogsTmpl + a.IsExec = true + return a.Input, nil +} + +const aoLogsTmpl = `#!/bin/bash + +watch_dir=${1:-/tmp/ansible-operator/runner} +filename=${2:-stdout} +mkdir -p ${watch_dir} +inotifywait -r -m -e close_write ${watch_dir} | while read dir op file +do + if [[ "${file}" = "${filename}" ]] ; then + echo "${dir}/${file}" + cat ${dir}/${file} + fi +done +` diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/deploy_operator.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/deploy_operator.go index cfe491d540..7094807f56 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/deploy_operator.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/deploy_operator.go @@ -54,10 +54,25 @@ spec: spec: serviceAccountName: {{.ProjectName}} containers: - - name: {{.ProjectName}} + - name: ansible + command: + - /usr/local/bin/ao-logs + - /tmp/ansible-operator/runner + - stdout # Replace this with the built image name image: "{{ "{{ REPLACE_IMAGE }}" }}" imagePullPolicy: "{{ "{{ pull_policy|default('Always') }}"}}" + volumeMounts: + - mountPath: /tmp/ansible-operator/runner + name: runner + readOnly: true + - name: operator + # Replace this with the built image name + image: "{{ "{{ REPLACE_IMAGE }}" }}" + imagePullPolicy: "{{ "{{ pull_policy|default('Always') }}"}}" + volumeMounts: + - mountPath: /tmp/ansible-operator/runner + name: runner env: - name: WATCH_NAMESPACE {{- if .IsClusterScoped }} @@ -73,4 +88,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "{{.ProjectName}}" + volumes: + - name: runner + emptyDir: {} ` diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/dockerfilehybrid.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/dockerfilehybrid.go index ee75f6efd6..2bdffaead2 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/dockerfilehybrid.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/dockerfilehybrid.go @@ -47,6 +47,7 @@ func (d *DockerfileHybrid) GetInput() (input.Input, error) { const dockerFileHybridAnsibleTmpl = `FROM ansible/ansible-runner RUN yum remove -y ansible python-idna +RUN yum install -y inotify-tools && yum clean all RUN pip uninstall ansible-runner -y RUN pip install --upgrade setuptools diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/gopkgtoml.go index a5a6a9435f..8c2d04c105 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/gopkgtoml.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/gopkgtoml.go @@ -36,7 +36,7 @@ const gopkgTomlTmpl = `[[constraint]] name = "github.com/operator-framework/operator-sdk" # The version rule is used for a specific release and the master branch for in between releases. # branch = "master" #osdk_branch_annotation - version = "=v0.5.0" #osdk_version_annotation + version = "=v0.6.0" #osdk_version_annotation [[override]] name = "k8s.io/api" diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/k8s_status.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/k8s_status.go index 48b8c41bf5..3d39e6171a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/k8s_status.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/k8s_status.go @@ -16,6 +16,8 @@ package ansible import ( "github.com/operator-framework/operator-sdk/pkg/scaffold/input" + + "github.com/spf13/afero" ) const K8sStatusPythonFile = "library/k8s_status.py" @@ -30,10 +32,11 @@ func (k *K8sStatus) GetInput() (input.Input, error) { if k.Path == "" { k.Path = K8sStatusPythonFile } - k.TemplateBody = k8sStatusTmpl return k.Input, nil } +func (s K8sStatus) SetFS(_ afero.Fs) {} + func (k K8sStatus) CustomRender() ([]byte, error) { return []byte(k8sStatusTmpl), nil } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_default_molecule.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_default_molecule.go index 1dc7ed3fc1..8ca9dd5355 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_default_molecule.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_default_molecule.go @@ -50,6 +50,7 @@ platforms: - k8s image: bsycorp/kind:latest-1.12 privileged: True + override_command: no exposed_ports: - 8443/tcp - 10080/tcp diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_cluster_playbook.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_cluster_playbook.go index 01cc4b9f26..530c70c13f 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_cluster_playbook.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_cluster_playbook.go @@ -58,7 +58,7 @@ const moleculeTestClusterPlaybookAnsibleTmpl = `--- debug: msg: "{{"{{"}} lookup('k8s', group='{{.Resource.FullGroup}}', api_version='{{.Resource.Version}}', kind='{{.Resource.Kind}}', namespace=namespace, resource_name=custom_resource.metadata.name) {{"}}"}}" - - name: Wait 40s for reconcilation to run + - name: Wait 40s for reconciliation to run k8s_facts: api_version: '{{.Resource.Version}}' kind: '{{.Resource.Kind }}' diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_molecule.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_molecule.go index 7f1d4d7e92..ac5c8a7cd0 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_molecule.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_molecule.go @@ -50,6 +50,7 @@ platforms: - k8s image: bsycorp/kind:latest-1.12 privileged: True + override_command: no exposed_ports: - 8443/tcp - 10080/tcp diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_playbook.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_playbook.go index cef65b076a..70da09ddf3 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_playbook.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/ansible/molecule_test_local_playbook.go @@ -100,7 +100,7 @@ const moleculeTestLocalPlaybookAnsibleTmpl = `--- namespace: '{{ "{{ namespace }}" }}' definition: "{{ "{{ custom_resource }}" }}" - - name: Wait 40s for reconcilation to run + - name: Wait 40s for reconciliation to run k8s_facts: api_version: '{{"{{"}} custom_resource.apiVersion {{"}}"}}' kind: '{{"{{"}} custom_resource.kind {{"}}"}}' diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/build_dockerfile.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/build_dockerfile.go index e1830d5960..37d3c46aae 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/build_dockerfile.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/build_dockerfile.go @@ -34,7 +34,7 @@ func (s *Dockerfile) GetInput() (input.Input, error) { return s.Input, nil } -const dockerfileTmpl = `FROM alpine:3.8 +const dockerfileTmpl = `FROM registry.access.redhat.com/ubi7-dev-preview/ubi-minimal:7.6 ENV OPERATOR=/usr/local/bin/{{.ProjectName}} \ USER_UID=1001 \ diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/controller_kind.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/controller_kind.go index a2476ccc49..c0e0eb3969 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/controller_kind.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/controller_kind.go @@ -44,7 +44,7 @@ const controllerKindTemplate = `package {{ .Resource.LowerKind }} import ( "context" - {{ .Resource.Group}}{{ .Resource.Version }} "{{ .Repo }}/pkg/apis/{{ .Resource.Group}}/{{ .Resource.Version }}" + {{ .Resource.GoImportGroup}}{{ .Resource.Version }} "{{ .Repo }}/pkg/apis/{{ .Resource.GoImportGroup}}/{{ .Resource.Version }}" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -88,7 +88,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { } // Watch for changes to primary resource {{ .Resource.Kind }} - err = c.Watch(&source.Kind{Type: &{{ .Resource.Group}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}}, &handler.EnqueueRequestForObject{}) + err = c.Watch(&source.Kind{Type: &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } @@ -97,7 +97,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // Watch for changes to secondary resource Pods and requeue the owner {{ .Resource.Kind }} err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ IsController: true, - OwnerType: &{{ .Resource.Group}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}, + OwnerType: &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{}, }) if err != nil { return err @@ -128,7 +128,7 @@ func (r *Reconcile{{ .Resource.Kind }}) Reconcile(request reconcile.Request) (re reqLogger.Info("Reconciling {{ .Resource.Kind }}") // Fetch the {{ .Resource.Kind }} instance - instance := &{{ .Resource.Group}}{{ .Resource.Version }}.{{ .Resource.Kind }}{} + instance := &{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}{} err := r.client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { @@ -171,7 +171,7 @@ func (r *Reconcile{{ .Resource.Kind }}) Reconcile(request reconcile.Request) (re } // newPodForCR returns a busybox pod with the same name/namespace as the cr -func newPodForCR(cr *{{ .Resource.Group}}{{ .Resource.Version }}.{{ .Resource.Kind }}) *corev1.Pod { +func newPodForCR(cr *{{ .Resource.GoImportGroup}}{{ .Resource.Version }}.{{ .Resource.Kind }}) *corev1.Pod { labels := map[string]string{ "app": cr.Name, } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/crd.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/crd.go index 4bb95477e2..d46ca77871 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/crd.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/crd.go @@ -22,13 +22,13 @@ import ( "strings" "sync" + "github.com/operator-framework/operator-sdk/internal/util/k8sutil" "github.com/operator-framework/operator-sdk/pkg/scaffold/input" "github.com/ghodss/yaml" "github.com/spf13/afero" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" ) @@ -76,6 +76,8 @@ func initCache() { }) } +func (s *CRD) SetFS(_ afero.Fs) {} + func (s *CRD) CustomRender() ([]byte, error) { i, _ := s.GetInput() // controller-tools generates crd file names with no _crd.yaml suffix: @@ -140,7 +142,7 @@ func (s *CRD) CustomRender() ([]byte, error) { } addCRDSubresource(dstCRD) addCRDVersions(dstCRD) - return getCRDBytes(dstCRD) + return k8sutil.GetObjectBytes(dstCRD) } func newCRDForResource(r *Resource) *apiextv1beta1.CustomResourceDefinition { @@ -214,14 +216,3 @@ func addCRDVersions(crd *apiextv1beta1.CustomResourceDefinition) { crd.Spec.Versions = crdVersions } } - -func getCRDBytes(crd *apiextv1beta1.CustomResourceDefinition) ([]byte, error) { - // Remove the "status" field from yaml data, which causes a - // resource creation error. - crdMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(crd) - if err != nil { - return nil, err - } - delete(crdMap, "status") - return yaml.Marshal(&crdMap) -} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/customrender.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/customrender.go index f3ff39c57a..5d3bfc65ca 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/customrender.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/customrender.go @@ -14,8 +14,16 @@ package scaffold +import "github.com/spf13/afero" + // CustomRenderer is the interface for writing any scaffold file that does // not use a template. type CustomRenderer interface { + // SetFS sets the fs in the CustomRenderer's underlying type if it exists. + // SetFS is used to inject the callers' fs into a CustomRenderer, which may + // want to write/read from the same fs. + SetFS(afero.Fs) + // CustomRender performs arbitrary rendering of file data and returns + // bytes to write to a file. CustomRender() ([]byte, error) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/doc.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/doc.go index 86bd812d7d..99ed2f37fc 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/doc.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/doc.go @@ -34,7 +34,7 @@ type Doc struct { func (s *Doc) GetInput() (input.Input, error) { if s.Path == "" { s.Path = filepath.Join(ApisDir, - strings.ToLower(s.Resource.Group), + s.Resource.GoImportGroup, strings.ToLower(s.Resource.Version), DocFile) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/gopkgtoml.go index fb100bb767..6a98988170 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/gopkgtoml.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/gopkgtoml.go @@ -103,7 +103,7 @@ required = [ name = "github.com/operator-framework/operator-sdk" # The version rule is used for a specific release and the master branch for in between releases. # branch = "master" #osdk_branch_annotation - version = "=v0.5.0" #osdk_version_annotation + version = "=v0.6.0" #osdk_version_annotation [prune] go-tests = true diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/chart.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/chart.go index 68c8c2d314..04de74b723 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/chart.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/chart.go @@ -15,40 +15,253 @@ package helm import ( + "fmt" + "io/ioutil" "os" "path/filepath" + "strings" "github.com/operator-framework/operator-sdk/pkg/scaffold" + "github.com/iancoleman/strcase" log "github.com/sirupsen/logrus" "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/downloader" + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/helm/environment" + "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/repo" ) -// HelmChartsDir is the relative directory within an SDK project where Helm -// charts are stored. -const HelmChartsDir string = "helm-charts" +const ( -// CreateChartForResource creates a new helm chart in the SDK project for the -// provided resource. -func CreateChartForResource(r *scaffold.Resource, projectDir string) (*chart.Chart, error) { - log.Infof("Create %s/%s/", HelmChartsDir, r.LowerKind) + // HelmChartsDir is the relative directory within an SDK project where Helm + // charts are stored. + HelmChartsDir string = "helm-charts" + + // DefaultAPIVersion is the Kubernetes CRD API Version used for fetched + // charts when the --api-version flag is not specified + DefaultAPIVersion string = "charts.helm.k8s.io/v1alpha1" +) + +// CreateChartOptions is used to configure how a Helm chart is scaffolded +// for a new Helm operator project. +type CreateChartOptions struct { + // ResourceAPIVersion defines the Kubernetes GroupVersion to be associated + // with the created chart. + ResourceAPIVersion string + + // ResourceKind defines the Kubernetes Kind to be associated with the + // created chart. + ResourceKind string + + // Chart is a chart reference for a local or remote chart. + Chart string + + // Repo is a URL to a custom chart repository. + Repo string + + // Version is the version of the chart to fetch. + Version string +} + +// CreateChart scaffolds a new helm chart for the project rooted in projectDir +// based on the passed opts. +// +// It returns a scaffold.Resource that can be used by the caller to create +// other related files. opts.ResourceAPIVersion and opts.ResourceKind are +// used to create the resource and must be specified if opts.Chart is empty. +// +// If opts.Chart is not empty, opts.ResourceAPIVersion and opts.Kind can be +// left unset: opts.ResourceAPIVersion defaults to "charts.helm.k8s.io/v1alpha1" +// and opts.ResourceKind is deduced from the specified opts.Chart. +// +// CreateChart also returns a chart.Chart that references the newly created +// chart. +// +// If opts.Chart is empty, CreateChart scaffolds the default chart from helm's +// default template. +// +// If opts.Chart is a local file, CreateChart verifies that it is a valid helm +// chart archive and unpacks it into the project's helm charts directory. +// +// If opts.Chart is a local directory, CreateChart verifies that it is a valid +// helm chart directory and copies it into the project's helm charts directory. +// +// For any other value of opts.Chart, CreateChart attempts to fetch the helm chart +// from a remote repository. +// +// If opts.Repo is not specified, the following chart reference formats are supported: +// +// - /: Fetch the helm chart named chartName from the helm +// chart repository named repoName, as specified in the +// $HELM_HOME/repositories/repositories.yaml file. +// +// - : Fetch the helm chart archive at the specified URL. +// +// If opts.Repo is specified, only one chart reference format is supported: +// +// - : Fetch the helm chart named chartName in the helm chart repository +// specified by opts.Repo +// +// If opts.Version is not set, CreateChart will fetch the latest available version of +// the helm chart. Otherwise, CreateChart will fetch the specified version. +// opts.Version is not used when opts.Chart itself refers to a specific version, for +// example when it is a local path or a URL. +// +// CreateChart returns an error if an error occurs creating the scaffold.Resource or +// creating the chart. +func CreateChart(projectDir string, opts CreateChartOptions) (*scaffold.Resource, *chart.Chart, error) { + chartsDir := filepath.Join(projectDir, HelmChartsDir) + err := os.MkdirAll(chartsDir, 0755) + if err != nil { + return nil, nil, err + } + + var ( + r *scaffold.Resource + c *chart.Chart + ) + + // If we don't have a helm chart reference, scaffold the default chart + // from Helm's default template. Otherwise, fetch it. + if len(opts.Chart) == 0 { + r, c, err = scaffoldChart(chartsDir, opts.ResourceAPIVersion, opts.ResourceKind) + } else { + r, c, err = fetchChart(chartsDir, opts) + } + if err != nil { + return nil, nil, err + } + log.Infof("Created %s/%s/", HelmChartsDir, c.GetMetadata().GetName()) + return r, c, nil +} + +func scaffoldChart(destDir, apiVersion, kind string) (*scaffold.Resource, *chart.Chart, error) { + r, err := scaffold.NewResource(apiVersion, kind) + if err != nil { + return nil, nil, err + } chartfile := &chart.Metadata{ + // Many helm charts use hyphenated names, but we chose not to because + // of the issues related to how hyphens are interpreted in templates. + // See https://github.com/helm/helm/issues/2192 Name: r.LowerKind, Description: "A Helm chart for Kubernetes", Version: "0.1.0", AppVersion: "1.0", ApiVersion: chartutil.ApiVersionV1, } + chartPath, err := chartutil.Create(chartfile, destDir) + if err != nil { + return nil, nil, err + } - chartsDir := filepath.Join(projectDir, HelmChartsDir) - if err := os.MkdirAll(chartsDir, 0755); err != nil { + chart, err := chartutil.LoadDir(chartPath) + if err != nil { + return nil, nil, err + } + return r, chart, nil +} + +func fetchChart(destDir string, opts CreateChartOptions) (*scaffold.Resource, *chart.Chart, error) { + var ( + stat os.FileInfo + chart *chart.Chart + err error + ) + + if stat, err = os.Stat(opts.Chart); err == nil { + chart, err = createChartFromDisk(destDir, opts.Chart, stat.IsDir()) + } else { + chart, err = createChartFromRemote(destDir, opts) + } + if err != nil { + return nil, nil, err + } + + chartName := chart.GetMetadata().GetName() + if len(opts.ResourceAPIVersion) == 0 { + opts.ResourceAPIVersion = DefaultAPIVersion + } + if len(opts.ResourceKind) == 0 { + opts.ResourceKind = strcase.ToCamel(chartName) + } + + r, err := scaffold.NewResource(opts.ResourceAPIVersion, opts.ResourceKind) + if err != nil { + return nil, nil, err + } + return r, chart, nil +} + +func createChartFromDisk(destDir, source string, isDir bool) (*chart.Chart, error) { + var ( + chart *chart.Chart + err error + ) + + // If source is a file or directory, attempt to load it + if isDir { + chart, err = chartutil.LoadDir(source) + } else { + chart, err = chartutil.LoadFile(source) + } + if err != nil { + return nil, err + } + + // Save it into our project's helm-charts directory. + if err := chartutil.SaveDir(chart, destDir); err != nil { + return nil, err + } + return chart, nil +} + +func createChartFromRemote(destDir string, opts CreateChartOptions) (*chart.Chart, error) { + helmHome, ok := os.LookupEnv(environment.HomeEnvVar) + if !ok { + helmHome = environment.DefaultHelmHome + } + getters := getter.All(environment.EnvSettings{}) + c := downloader.ChartDownloader{ + HelmHome: helmpath.Home(helmHome), + Out: os.Stderr, + Getters: getters, + } + + if opts.Repo != "" { + chartURL, err := repo.FindChartInRepoURL(opts.Repo, opts.Chart, opts.Version, "", "", "", getters) + if err != nil { + return nil, err + } + opts.Chart = chartURL + } + + tmpDir, err := ioutil.TempDir("", "osdk-helm-chart") + if err != nil { return nil, err } - chartDir, err := chartutil.Create(chartfile, chartsDir) + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + log.Errorf("Failed to remove temporary directory %s: %s", tmpDir, err) + } + }() + + chartArchive, _, err := c.DownloadTo(opts.Chart, opts.Version, tmpDir) if err != nil { + // One of Helm's error messages directs users to run `helm init`, which + // installs tiller in a remote cluster. Since that's unnecessary and + // unhelpful, modify the error message to be relevant for operator-sdk. + if strings.Contains(err.Error(), "Couldn't load repositories file") { + return nil, fmt.Errorf("failed to load repositories file %s "+ + "(you might need to run `helm init --client-only` "+ + "to create and initialize it)", c.HelmHome.RepositoryFile()) + } return nil, err } - return chartutil.LoadDir(chartDir) + + return createChartFromDisk(destDir, chartArchive, false) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/dockerfilehybrid.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/dockerfilehybrid.go index 4954cac442..f7d9944bfe 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/dockerfilehybrid.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/dockerfilehybrid.go @@ -41,7 +41,7 @@ func (d *DockerfileHybrid) GetInput() (input.Input, error) { return d.Input, nil } -const dockerFileHybridHelmTmpl = `FROM alpine:3.6 +const dockerFileHybridHelmTmpl = `FROM registry.access.redhat.com/ubi7-dev-preview/ubi-minimal:7.6 ENV OPERATOR=/usr/local/bin/helm-operator \ USER_UID=1001 \ diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/gopkgtoml.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/gopkgtoml.go index 3881ee2252..49daa8c513 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/gopkgtoml.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/gopkgtoml.go @@ -36,11 +36,7 @@ const gopkgTomlTmpl = `[[constraint]] name = "github.com/operator-framework/operator-sdk" # The version rule is used for a specific release and the master branch for in between releases. # branch = "master" #osdk_branch_annotation - version = "=v0.5.0" #osdk_version_annotation - -[[override]] - name = "k8s.io/kubernetes" - version = "=1.12.3" + version = "=v0.6.0" #osdk_version_annotation [[override]] name = "k8s.io/api" @@ -68,7 +64,11 @@ const gopkgTomlTmpl = `[[constraint]] # We need overrides for the following imports because dep can't resolve them # correctly. The easiest way to get this right is to use the versions that -# k8s.io/helm uses. See https://github.com/helm/helm/blob/v2.12.0-rc.1/glide.lock +# k8s.io/helm uses. See https://github.com/helm/helm/blob/v2.13.0/glide.lock +[[override]] + name = "k8s.io/kubernetes" + revision = "c6d339953bd4fd8c021a6b5fb46d7952b30be9f9" + [[override]] name = "github.com/russross/blackfriday" revision = "300106c228d52c8941d4b3de6054a6062a86dda3" diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/operator.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/operator.go index 8e19eacb23..a33c12ee3a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/operator.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/operator.go @@ -56,9 +56,6 @@ spec: - name: {{.ProjectName}} # Replace this with the built image name image: REPLACE_IMAGE - ports: - - containerPort: 60000 - name: metrics imagePullPolicy: Always env: - name: WATCH_NAMESPACE diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/watches.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/watches.go index 99ca1ba55f..e8e6db626a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/watches.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/helm/watches.go @@ -27,6 +27,7 @@ type WatchesYAML struct { Resource *scaffold.Resource HelmChartsDir string + ChartName string } // GetInput gets the scaffold execution input @@ -36,6 +37,9 @@ func (s *WatchesYAML) GetInput() (input.Input, error) { } s.HelmChartsDir = HelmChartsDir s.TemplateBody = watchesYAMLTmpl + if s.ChartName == "" { + s.ChartName = s.Resource.LowerKind + } return s.Input, nil } @@ -43,5 +47,5 @@ const watchesYAMLTmpl = `--- - version: {{.Resource.Version}} group: {{.Resource.FullGroup}} kind: {{.Resource.Kind}} - chart: /opt/helm/{{.HelmChartsDir}}/{{.Resource.LowerKind}} + chart: /opt/helm/{{.HelmChartsDir}}/{{.ChartName}} ` diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/concat_crd.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/concat_crd.go deleted file mode 100644 index 50c8178adb..0000000000 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/concat_crd.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018 The Operator-SDK Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package catalog - -import ( - "io/ioutil" - "path/filepath" - - "github.com/operator-framework/operator-sdk/internal/util/yamlutil" - "github.com/operator-framework/operator-sdk/pkg/scaffold" - "github.com/operator-framework/operator-sdk/pkg/scaffold/input" -) - -const ConcatCRDYamlFile = "_generated.concat_crd.yaml" - -// ConcatCRD scaffolds a file of all concatenated CRD's found using config file -// fields. This file is used by the OLM to create CR's in conjunction with the -// operators' CSV. -type ConcatCRD struct { - input.Input - - // ConfigFilePath is the location of a configuration file path for this - // projects' CSV file. - ConfigFilePath string -} - -func (s *ConcatCRD) GetInput() (input.Input, error) { - if s.Path == "" { - s.Path = filepath.Join(scaffold.OLMCatalogDir, ConcatCRDYamlFile) - } - if s.ConfigFilePath == "" { - s.ConfigFilePath = filepath.Join(scaffold.OLMCatalogDir, CSVConfigYamlFile) - } - return s.Input, nil -} - -// CustomRender returns the bytes of all CRD manifests concatenated into one file. -func (s *ConcatCRD) CustomRender() ([]byte, error) { - cfg, err := getCSVConfig(s.ConfigFilePath) - if err != nil { - return nil, err - } - return concatCRDsInPaths(cfg.CRDCRPaths) -} - -// concatCRDsInPaths concatenates CRD manifests found at crdPaths into one -// file, delimited by `---`. -func concatCRDsInPaths(crdPaths []string) (cb []byte, err error) { - for _, f := range crdPaths { - yamlData, err := ioutil.ReadFile(f) - if err != nil { - return nil, err - } - - scanner := yamlutil.NewYAMLScanner(yamlData) - for scanner.Scan() { - yamlSpec := scanner.Bytes() - k, err := getKindfromYAML(yamlSpec) - if err != nil { - return nil, err - } - if k == "CustomResourceDefinition" { - cb = yamlutil.CombineManifests(cb, yamlSpec) - } - } - } - - return cb, nil -} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/config.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/config.go index a8318f62e4..d0dbdd9f7c 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/config.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/config.go @@ -37,7 +37,7 @@ type CSVConfig struct { } // TODO: discuss case of no config file at default path: write new file or not. -func getCSVConfig(cfgFile string) (*CSVConfig, error) { +func GetCSVConfig(cfgFile string) (*CSVConfig, error) { cfg := &CSVConfig{} if _, err := os.Stat(cfgFile); err == nil { cfgData, err := ioutil.ReadFile(cfgFile) diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv.go index 85c9e5bcbc..5436e152b8 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv.go @@ -24,20 +24,20 @@ import ( "sync" "unicode" + "github.com/operator-framework/operator-sdk/internal/util/k8sutil" "github.com/operator-framework/operator-sdk/internal/util/yamlutil" "github.com/operator-framework/operator-sdk/pkg/scaffold" "github.com/operator-framework/operator-sdk/pkg/scaffold/input" - "github.com/spf13/afero" - "k8s.io/apimachinery/pkg/runtime" "github.com/coreos/go-semver/semver" "github.com/ghodss/yaml" olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" log "github.com/sirupsen/logrus" + "github.com/spf13/afero" ) const ( - CSVYamlFileExt = ".csv.yaml" + CSVYamlFileExt = ".clusterserviceversion.yaml" CSVConfigYamlFile = "csv-config.yaml" ) @@ -51,9 +51,14 @@ type CSV struct { ConfigFilePath string // CSVVersion is the CSV current version. CSVVersion string - - once sync.Once - fs afero.Fs // For testing, ex. afero.NewMemMapFs() + // FromVersion is the CSV version from which to build a new CSV. A CSV + // manifest with this version should exist at: + // deploy/olm-catalog/{from_version}/operator-name.v{from_version}.{CSVYamlFileExt} + FromVersion string + + once sync.Once + fs afero.Fs // For testing, ex. afero.NewMemMapFs() + pathPrefix string // For testing, ex. testdata/deploy/olm-catalog } func (s *CSV) initFS(fs afero.Fs) { @@ -73,15 +78,24 @@ func (s *CSV) GetInput() (input.Input, error) { return input.Input{}, ErrNoCSVVersion } if s.Path == "" { - name := strings.ToLower(s.ProjectName) + CSVYamlFileExt - s.Path = filepath.Join(scaffold.OLMCatalogDir, name) + lowerProjName := strings.ToLower(s.ProjectName) + // Path is what the operator-registry expects: + // {manifests -> olm-catalog}/{operator_name}/{semver}/{operator_name}.v{semver}.clusterserviceversion.yaml + s.Path = filepath.Join(s.pathPrefix, + scaffold.OLMCatalogDir, + lowerProjName, + s.CSVVersion, + getCSVFileName(lowerProjName, s.CSVVersion), + ) } if s.ConfigFilePath == "" { - s.ConfigFilePath = filepath.Join(scaffold.OLMCatalogDir, CSVConfigYamlFile) + s.ConfigFilePath = filepath.Join(s.pathPrefix, scaffold.OLMCatalogDir, CSVConfigYamlFile) } return s.Input, nil } +func (s *CSV) SetFS(fs afero.Fs) { s.initFS(fs) } + // CustomRender allows a CSV to be written by marshalling // olmapiv1alpha1.ClusterServiceVersion instead of writing to a template. func (s *CSV) CustomRender() ([]byte, error) { @@ -97,11 +111,12 @@ func (s *CSV) CustomRender() ([]byte, error) { s.initCSVFields(csv) } - cfg, err := getCSVConfig(s.ConfigFilePath) + cfg, err := GetCSVConfig(s.ConfigFilePath) if err != nil { return nil, err } + setCSVDefaultFields(csv) if err = s.updateCSVVersions(csv); err != nil { return nil, err } @@ -119,20 +134,22 @@ func (s *CSV) CustomRender() ([]byte, error) { } } - // Remove the status field from the CSV, as status is managed at runtime. - cu, err := runtime.DefaultUnstructuredConverter.ToUnstructured(csv) - if err != nil { - return nil, err - } - delete(cu, "status") - return yaml.Marshal(&cu) + return k8sutil.GetObjectBytes(csv) } func (s *CSV) getBaseCSVIfExists() (*olmapiv1alpha1.ClusterServiceVersion, bool, error) { - lowerProjName := strings.ToLower(s.ProjectName) - name := lowerProjName + CSVYamlFileExt - fromCSV := filepath.Join(scaffold.OLMCatalogDir, name) - return getCSVFromFSIfExists(s.getFS(), fromCSV) + verToGet := s.CSVVersion + if s.FromVersion != "" { + verToGet = s.FromVersion + } + csv, exists, err := getCSVFromFSIfExists(s.getFS(), s.getCSVPath(verToGet)) + if err != nil { + return nil, false, err + } + if !exists && s.FromVersion != "" { + log.Warnf("FromVersion set (%s) but CSV does not exist", s.FromVersion) + } + return csv, exists, nil } func getCSVFromFSIfExists(fs afero.Fs, path string) (*olmapiv1alpha1.ClusterServiceVersion, bool, error) { @@ -159,6 +176,16 @@ func getCSVName(name, version string) string { return name + ".v" + version } +func getCSVFileName(name, version string) string { + return getCSVName(name, version) + CSVYamlFileExt +} + +func (s *CSV) getCSVPath(ver string) string { + lowerProjName := strings.ToLower(s.ProjectName) + name := getCSVFileName(lowerProjName, ver) + return filepath.Join(s.pathPrefix, scaffold.OLMCatalogDir, lowerProjName, ver, name) +} + // getDisplayName turns a project dir name in any of {snake, chain, camel} // cases, hierarchical dot structure, or space-delimited into a // space-delimited, title'd display name. @@ -202,6 +229,7 @@ func (s *CSV) initCSVFields(csv *olmapiv1alpha1.ClusterServiceVersion) { csv.TypeMeta.Kind = olmapiv1alpha1.ClusterServiceVersionKind csv.SetName(getCSVName(strings.ToLower(s.ProjectName), s.CSVVersion)) csv.SetNamespace("placeholder") + csv.SetAnnotations(map[string]string{"capabilities": "Basic Install"}) // Spec fields csv.Spec.Version = *semver.New(s.CSVVersion) @@ -211,7 +239,19 @@ func (s *CSV) initCSVFields(csv *olmapiv1alpha1.ClusterServiceVersion) { csv.Spec.Provider = olmapiv1alpha1.AppLink{} csv.Spec.Maintainers = make([]olmapiv1alpha1.Maintainer, 0) csv.Spec.Links = make([]olmapiv1alpha1.AppLink, 0) - csv.SetLabels(make(map[string]string)) +} + +// setCSVDefaultFields sets default fields on older CSV versions or newly +// initialized CSV's. +func setCSVDefaultFields(csv *olmapiv1alpha1.ClusterServiceVersion) { + if len(csv.Spec.InstallModes) == 0 { + csv.Spec.InstallModes = []olmapiv1alpha1.InstallMode{ + {Type: olmapiv1alpha1.InstallModeTypeOwnNamespace, Supported: true}, + {Type: olmapiv1alpha1.InstallModeTypeSingleNamespace, Supported: true}, + {Type: olmapiv1alpha1.InstallModeTypeMultiNamespace, Supported: false}, + {Type: olmapiv1alpha1.InstallModeTypeAllNamespaces, Supported: true}, + } + } } // TODO: validate that all fields from files are populated as expected @@ -247,8 +287,8 @@ func getEmptyRequiredCSVFields(csv *olmapiv1alpha1.ClusterServiceVersion) (field if csv.Spec.Provider == (olmapiv1alpha1.AppLink{}) { fields = append(fields, "spec.provider") } - if len(csv.Spec.Labels) == 0 { - fields = append(fields, "spec.labels") + if csv.Spec.Maturity == "" { + fields = append(fields, "spec.maturity") } return fields diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv_updaters.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv_updaters.go index 78dc5d7161..5be5da135c 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv_updaters.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/olm-catalog/csv_updaters.go @@ -84,7 +84,7 @@ func (s *updaterStore) AddToUpdater(yamlSpec []byte) error { case "Deployment": return s.AddDeploymentSpec(yamlSpec) case "CustomResourceDefinition": - // TODO: determine whether 'owned' or 'required' + // All CRD's present will be 'owned'. return s.AddOwnedCRD(yamlSpec) } return nil @@ -193,59 +193,34 @@ type CSVCustomResourceDefinitionsUpdate struct { } func (store *updaterStore) AddOwnedCRD(yamlDoc []byte) error { - crdDesc, err := parseCRDDescriptionFromYAML(yamlDoc) - if err == nil { - store.crdUpdate.Owned = append(store.crdUpdate.Owned, *crdDesc) - } - return err -} - -func (store *updaterStore) AddRequiredCRD(yamlDoc []byte) error { - crdDesc, err := parseCRDDescriptionFromYAML(yamlDoc) - if err == nil { - store.crdUpdate.Required = append(store.crdUpdate.Required, *crdDesc) - } - return err -} - -func parseCRDDescriptionFromYAML(yamlDoc []byte) (*olmapiv1alpha1.CRDDescription, error) { crd := &apiextv1beta1.CustomResourceDefinition{} if err := yaml.Unmarshal(yamlDoc, crd); err != nil { - return nil, err + return err } - return &olmapiv1alpha1.CRDDescription{ + store.crdUpdate.Owned = append(store.crdUpdate.Owned, olmapiv1alpha1.CRDDescription{ Name: crd.ObjectMeta.Name, Version: crd.Spec.Version, Kind: crd.Spec.Names.Kind, - }, nil + }) + return nil } -// Apply updates all CRDDescriptions with any user-defined data in csv's -// CRDDescriptions. +// Apply updates csv's "owned" CRDDescriptions. "required" CRDDescriptions are +// left as-is, since they are user-defined values. func (u *CSVCustomResourceDefinitionsUpdate) Apply(csv *olmapiv1alpha1.ClusterServiceVersion) error { - set := make(map[string]*olmapiv1alpha1.CRDDescription) - for _, csvDesc := range csv.GetAllCRDDescriptions() { - set[csvDesc.Name] = &csvDesc + set := make(map[string]olmapiv1alpha1.CRDDescription) + for _, csvDesc := range csv.Spec.CustomResourceDefinitions.Owned { + set[csvDesc.Name] = csvDesc } du := u.DeepCopy() - for i, uDesc := range du.Owned { - if csvDesc, ok := set[uDesc.Name]; ok { - d := csvDesc.DeepCopy() - d.Name = uDesc.Name - d.Version = uDesc.Version - d.Kind = uDesc.Kind - du.Owned[i] = *d - } - } - for i, uDesc := range du.Required { + for i, uDesc := range u.Owned { if csvDesc, ok := set[uDesc.Name]; ok { - d := csvDesc.DeepCopy() - d.Name = uDesc.Name - d.Version = uDesc.Version - d.Kind = uDesc.Kind - du.Required[i] = *d + csvDesc.Name = uDesc.Name + csvDesc.Version = uDesc.Version + csvDesc.Kind = uDesc.Kind + du.Owned[i] = csvDesc } } - csv.Spec.CustomResourceDefinitions = *du + csv.Spec.CustomResourceDefinitions.Owned = du.Owned return nil } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/register.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/register.go index 7c8a0329a9..2ae1e1de9e 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/register.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/register.go @@ -34,7 +34,7 @@ type Register struct { func (s *Register) GetInput() (input.Input, error) { if s.Path == "" { s.Path = filepath.Join(ApisDir, - strings.ToLower(s.Resource.Group), + s.Resource.GoImportGroup, strings.ToLower(s.Resource.Version), RegisterFile) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/resource.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/resource.go index 497b206096..6039b39bd9 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/resource.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/resource.go @@ -51,6 +51,9 @@ type Resource struct { // Parsed from APIVersion Group string + // GoImportGroup is the non-hyphenated go import group for this resource + GoImportGroup string + // Version is the API version - e.g. v1alpha1 // Parsed from APIVersion Version string @@ -127,6 +130,9 @@ func (r *Resource) checkAndSetGroups() error { r.FullGroup = fg[0] r.Group = g[0] + s := strings.ToLower(r.Group) + r.GoImportGroup = strings.Replace(s, "-", "", -1) + if err := validation.IsDNS1123Subdomain(r.Group); err != nil { return fmt.Errorf("group name is invalid: %v", err) } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/role.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/role.go index 9cbb961b7e..075f09d47a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/role.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/role.go @@ -191,4 +191,12 @@ rules: verbs: - "get" - "create" +- apiGroups: + - apps + resources: + - deployments/finalizers + resourceNames: + - {{ .ProjectName }} + verbs: + - "update" ` diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/scaffold.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/scaffold.go index 75499e1592..6235cf1f6a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/scaffold.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/scaffold.go @@ -29,6 +29,7 @@ import ( "github.com/operator-framework/operator-sdk/pkg/scaffold/input" log "github.com/sirupsen/logrus" + "github.com/spf13/afero" "golang.org/x/tools/imports" ) @@ -36,13 +37,13 @@ import ( type Scaffold struct { // Repo is the go project package Repo string - // AbsProjectPath is the absolute path to the project root, including the project directory. AbsProjectPath string - // ProjectName is the operator's name, ex. app-operator ProjectName string - + // Fs is the filesystem GetWriter uses to write scaffold files. + Fs afero.Fs + // GetWriter returns a writer for writing scaffold files. GetWriter func(path string, mode os.FileMode) (io.Writer, error) } @@ -74,8 +75,11 @@ func (s *Scaffold) configure(cfg *input.Config) { // Execute executes scaffolding the Files func (s *Scaffold) Execute(cfg *input.Config, files ...input.File) error { + if s.Fs == nil { + s.Fs = afero.NewOsFs() + } if s.GetWriter == nil { - s.GetWriter = fileutil.NewFileWriter().WriteCloser + s.GetWriter = fileutil.NewFileWriterFS(s.Fs).WriteCloser } // Configure s using common fields from cfg. @@ -107,7 +111,7 @@ func (s *Scaffold) doFile(e input.File) error { absFilePath := filepath.Join(s.AbsProjectPath, i.Path) // Check if the file to write already exists - if _, err := os.Stat(absFilePath); err == nil || os.IsExist(err) { + if _, err := s.Fs.Stat(absFilePath); err == nil || os.IsExist(err) { switch i.IfExistsAction { case input.Overwrite: case input.Skip: @@ -141,6 +145,7 @@ func (s *Scaffold) doRender(i input.Input, e input.File, absPath string) error { var b []byte if c, ok := e.(CustomRenderer); ok { + c.SetFS(s.Fs) // CustomRenderers have a non-template method of file rendering. if b, err = c.CustomRender(); err != nil { return err @@ -168,13 +173,15 @@ func (s *Scaffold) doRender(i input.Input, e input.File, absPath string) error { } // Files being overwritten must be trucated to len 0 so no old bytes remain. - if _, err = os.Stat(absPath); err == nil && i.IfExistsAction == input.Overwrite { - if err = os.Truncate(absPath, 0); err != nil { - return err + if _, err = s.Fs.Stat(absPath); err == nil && i.IfExistsAction == input.Overwrite { + if file, ok := f.(afero.File); ok { + if err = file.Truncate(0); err != nil { + return err + } } } _, err = f.Write(b) - log.Infoln("Create", i.Path) + log.Infoln("Created", i.Path) return err } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/types.go b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/types.go index c4bacca9f7..98e129eb6a 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/types.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/scaffold/types.go @@ -32,7 +32,7 @@ type Types struct { func (s *Types) GetInput() (input.Input, error) { if s.Path == "" { s.Path = filepath.Join(ApisDir, - strings.ToLower(s.Resource.Group), + s.Resource.GoImportGroup, strings.ToLower(s.Resource.Version), s.Resource.LowerKind+"_types.go") } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/test/context.go b/vendor/github.com/operator-framework/operator-sdk/pkg/test/context.go index 4923c30746..9427faf245 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/test/context.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/test/context.go @@ -67,26 +67,19 @@ func (ctx *TestCtx) GetID() string { } func (ctx *TestCtx) Cleanup() { - for i := len(ctx.cleanupFns) - 1; i >= 0; i-- { - err := ctx.cleanupFns[i]() - if err != nil { - ctx.t.Errorf("A cleanup function failed with error: (%v)\n", err) - } - } -} - -// CleanupNoT is a modified version of Cleanup; does not use t for logging, instead uses log -// intended for use by MainEntry, which does not have a testing.T -func (ctx *TestCtx) CleanupNoT() { failed := false for i := len(ctx.cleanupFns) - 1; i >= 0; i-- { err := ctx.cleanupFns[i]() if err != nil { failed = true - log.Errorf("A cleanup function failed with error: (%v)", err) + if ctx.t != nil { + ctx.t.Errorf("A cleanup function failed with error: (%v)\n", err) + } else { + log.Errorf("A cleanup function failed with error: (%v)", err) + } } } - if failed { + if ctx.t == nil && failed { log.Fatal("A cleanup function failed") } } diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/test/e2eutil/wait_util.go b/vendor/github.com/operator-framework/operator-sdk/pkg/test/e2eutil/wait_util.go index e0d0879a16..9b155bd930 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/test/e2eutil/wait_util.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/test/e2eutil/wait_util.go @@ -15,6 +15,7 @@ package e2eutil import ( + "context" "testing" "time" @@ -22,8 +23,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" ) // WaitForDeployment checks to see if a given deployment has a certain number of available replicas after a specified amount of time @@ -67,3 +70,30 @@ func waitForDeployment(t *testing.T, kubeclient kubernetes.Interface, namespace, t.Logf("Deployment available (%d/%d)\n", replicas, replicas) return nil } + +func WaitForDeletion(t *testing.T, dynclient client.Client, obj runtime.Object, retryInterval, timeout time.Duration) error { + key, err := client.ObjectKeyFromObject(obj) + if err != nil { + return err + } + + kind := obj.GetObjectKind().GroupVersionKind().Kind + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err = wait.Poll(retryInterval, timeout, func() (done bool, err error) { + err = dynclient.Get(ctx, key, obj) + if apierrors.IsNotFound(err) { + return true, nil + } + if err != nil { + return false, err + } + t.Logf("Waiting for %s %s to be deleted\n", kind, key) + return false, nil + }) + if err != nil { + return err + } + t.Logf("%s %s was deleted\n", kind, key) + return nil +} diff --git a/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go b/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go index 45f7b79a59..ddabe5f38b 100644 --- a/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go +++ b/vendor/github.com/operator-framework/operator-sdk/pkg/test/main_entry.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/tools/clientcmd" + "github.com/operator-framework/operator-sdk/internal/util/projutil" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/operator-framework/operator-sdk/pkg/scaffold" log "github.com/sirupsen/logrus" @@ -63,10 +64,16 @@ func MainEntry(m *testing.M) { var localCmd *exec.Cmd var localCmdOutBuf, localCmdErrBuf bytes.Buffer if *localOperator { - // TODO: make a generic 'up-local' function to deduplicate shared code between this and cmd/up/local - // taken from commands/operator-sdk/cmd/up/local.go - runArgs := append([]string{"run"}, []string{filepath.Join(scaffold.ManagerDir, scaffold.CmdFile)}...) - localCmd = exec.Command("go", runArgs...) + absProjectPath := projutil.MustGetwd() + projectName := filepath.Base(absProjectPath) + outputBinName := filepath.Join(scaffold.BuildBinDir, projectName+"-local") + args := []string{"build", "-o", outputBinName} + args = append(args, filepath.Join(scaffold.ManagerDir, scaffold.CmdFile)) + bc := exec.Command("go", args...) + if err := projutil.ExecCmd(bc); err != nil { + log.Fatalf("Failed to build local operator binary: %s", err) + } + localCmd = exec.Command(outputBinName) localCmd.Stdout = &localCmdOutBuf localCmd.Stderr = &localCmdErrBuf c := make(chan os.Signal) @@ -110,7 +117,7 @@ func MainEntry(m *testing.M) { log.Infof("Local operator stdout: %s", string(localCmdOutBuf.Bytes())) log.Infof("Local operator stderr: %s", string(localCmdErrBuf.Bytes())) } - ctx.CleanupNoT() + ctx.Cleanup() os.Exit(exitCode) }() // create crd diff --git a/vendor/github.com/operator-framework/operator-sdk/version/version.go b/vendor/github.com/operator-framework/operator-sdk/version/version.go index 1c6e0d77e6..9dfbc5334d 100644 --- a/vendor/github.com/operator-framework/operator-sdk/version/version.go +++ b/vendor/github.com/operator-framework/operator-sdk/version/version.go @@ -15,5 +15,5 @@ package version var ( - Version = "v0.5.0" + Version = "v0.6.0" )