diff --git a/Makefile b/Makefile index ce88257e9..a7c8799c7 100644 --- a/Makefile +++ b/Makefile @@ -575,8 +575,11 @@ test-benchmark: ## Run the benchmark tests -- Note that this can only be ran for @echo "The pprof files generated are: cpu.prof and mem.prof" .PHONY: e2e -e2e: e2e-set-image prep-e2e ## Run full end-to-end tests that exercise content on an operational cluster. - @CONTENT_IMAGE=$(E2E_CONTENT_IMAGE_PATH) BROKEN_CONTENT_IMAGE=$(E2E_BROKEN_CONTENT_IMAGE_PATH) $(GO) test ./tests/e2e $(E2E_GO_TEST_FLAGS) -args $(E2E_ARGS) | tee tests/e2e-test.log +e2e: e2e-set-image prep-e2e e2e-parallel e2e-test-wait e2e-serial ## Run full end-to-end tests that exercise content on an operational cluster. + +.PHONY: e2e +e2e-test-wait: + ./utils/e2e-test-wait.sh .PHONY: e2e-parallel e2e-parallel: e2e-set-image prep-e2e ## Run non-destructive end-to-end tests concurrently. diff --git a/tests/e2e/constants.go b/tests/e2e/constants.go deleted file mode 100644 index 27ef19f17..000000000 --- a/tests/e2e/constants.go +++ /dev/null @@ -1,19 +0,0 @@ -package e2e - -import "time" - -const ( - retryInterval = time.Second * 5 - timeout = time.Minute * 30 - cleanupRetryInterval = time.Second * 1 - cleanupTimeout = time.Minute * 5 - machineOperationTimeout = time.Minute * 25 - machineOperationRetryInterval = time.Second * 10 - maxRetries = 5 - workerPoolName = "worker" - testPoolName = "e2e" - testInvalidPoolName = "e2e-invalid" - rhcosContentFile = "ssg-rhcos4-ds.xml" - ocpContentFile = "ssg-ocp4-ds.xml" - unexistentResourceContentFile = "ocp4-unexistent-resource.xml" -) diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go deleted file mode 100644 index 7a07b7b7e..000000000 --- a/tests/e2e/e2e_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package e2e - -import ( - goctx "context" - "fmt" - "testing" - - compv1alpha1 "github.com/ComplianceAsCode/compliance-operator/pkg/apis/compliance/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/ComplianceAsCode/compliance-operator/tests/e2e/framework" -) - -func TestE2E(t *testing.T) { - executeTests(t, - //testExecution{ - // Name: "TestNodeSchedulingErrorFailsTheScan", - // IsParallel: false, - // TestFn: func(t *testing.T, f *framework.Framework, ctx *framework.Context, namespace string) error { - // workerNodesLabel := map[string]string{ - // "node-role.kubernetes.io/worker": "", - // } - // workerNodes := getNodesWithSelectorOrFail(t, f, workerNodesLabel) - // // taintedNode := &workerNodes[0] - // taintKey := "co-e2e" - // taintVal := "val" - // taint := corev1.Taint{ - // Key: taintKey, - // Value: taintVal, - // Effect: corev1.TaintEffectNoSchedule, - // } - // if err := taintNode(t, f, taintedNode, taint); err != nil { - // E2ELog(t, "Tainting node failed") - // return err - // } - // suiteName := getObjNameFromTest(t) - // scanName := suiteName - // suite := &compv1alpha1.ComplianceSuite{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: suiteName, - // Namespace: namespace, - // }, - // Spec: compv1alpha1.ComplianceSuiteSpec{ - // Scans: []compv1alpha1.ComplianceScanSpecWrapper{ - // { - // ComplianceScanSpec: compv1alpha1.ComplianceScanSpec{ - // ContentImage: contentImagePath, - // Profile: "xccdf_org.ssgproject.content_profile_moderate", - // Rule: "xccdf_org.ssgproject.content_rule_no_netrc_files", - // Content: rhcosContentFile, - // NodeSelector: workerNodesLabel, - // ComplianceScanSettings: compv1alpha1.ComplianceScanSettings{ - // Debug: true, - // }, - // }, - // Name: scanName, - // }, - // }, - // }, - // } - // if err := f.Client.Create(goctx.TODO(), suite, getCleanupOpts(ctx)); err != nil { - // return err - // } - // // err := waitForSuiteScansStatus(t, f, namespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultError) - // if err != nil { - // return err - // } - // return removeNodeTaint(t, f, taintedNode.Name, taintKey) - // }, - //}, - testExecution{ - Name: "TestKubeletConfigRemediation", - IsParallel: false, - TestFn: func(t *testing.T, f *framework.Framework, ctx *framework.Context, namespace string) error { - suiteName := "kubelet-remediation-test-suite" - - tp := &compv1alpha1.TailoredProfile{ - ObjectMeta: metav1.ObjectMeta{ - Name: suiteName, - Namespace: namespace, - }, - Spec: compv1alpha1.TailoredProfileSpec{ - Title: "kubelet-remediation-test", - Description: "A test tailored profile to test kubelet remediation", - EnableRules: []compv1alpha1.RuleReferenceSpec{ - { - Name: "ocp4-kubelet-enable-streaming-connections", - Rationale: "To be tested", - }, - { - Name: "ocp4-version-detect-in-ocp", - Rationale: "To be tested", - }, - }, - SetValues: []compv1alpha1.VariableValueSpec{ - { - Name: "ocp4-var-streaming-connection-timeouts", - Rationale: "Value to be set", - Value: "8h0m0s", - }, - { - Name: "ocp4-var-role-master", - Rationale: "Value to be set", - Value: testPoolName, - }, - { - Name: "ocp4-var-role-worker", - Rationale: "Value to be set", - Value: testPoolName, - }, - }, - }, - } - createTPErr := f.Client.Create(goctx.TODO(), tp, getCleanupOpts(ctx)) - if createTPErr != nil { - return createTPErr - } - - ssb := &compv1alpha1.ScanSettingBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: suiteName, - Namespace: namespace, - }, - Profiles: []compv1alpha1.NamedObjectReference{ - { - APIGroup: "compliance.openshift.io/v1alpha1", - Kind: "TailoredProfile", - Name: suiteName, - }, - }, - SettingsRef: &compv1alpha1.NamedObjectReference{ - APIGroup: "compliance.openshift.io/v1alpha1", - Kind: "ScanSetting", - Name: "e2e-default-auto-apply", - }, - } - - err := f.Client.Create(goctx.TODO(), ssb, getCleanupOpts(ctx)) - if err != nil { - return err - } - - // Ensure that all the scans in the suite have finished and are marked as Done - err = waitForSuiteScansStatus(t, f, namespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultNonCompliant) - if err != nil { - return err - } - - scanName := suiteName - - // We need to check that the remediation is auto-applied and save - // the object so we can delete it later - remName := scanName + "-kubelet-enable-streaming-connections" - waitForGenericRemediationToBeAutoApplied(t, f, remName, namespace) - - err = reRunScan(t, f, scanName, namespace) - if err != nil { - return err - } - - // Scan has been re-started - E2ELogf(t, "Scan phase should be reset") - err = waitForSuiteScansStatus(t, f, namespace, suiteName, compv1alpha1.PhaseRunning, compv1alpha1.ResultNotAvailable) - if err != nil { - return err - } - - // Ensure that all the scans in the suite have finished and are marked as Done - E2ELogf(t, "Let's wait for it to be done now") - err = waitForSuiteScansStatus(t, f, namespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultCompliant) - if err != nil { - return err - } - E2ELogf(t, "scan re-run has finished") - - // Now the check should be passing - checkResult := compv1alpha1.ComplianceCheckResult{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-kubelet-enable-streaming-connections", suiteName), - Namespace: namespace, - }, - ID: "xccdf_org.ssgproject.content_rule_kubelet_enable_streaming_connections", - Status: compv1alpha1.CheckResultPass, - Severity: compv1alpha1.CheckResultSeverityMedium, - } - err = assertHasCheck(f, suiteName, scanName, checkResult) - if err != nil { - return err - } - - err = assertHasCheck(f, suiteName, scanName, checkResult) - if err != nil { - return err - } - - // The remediation must not be Outdated - remediation := &compv1alpha1.ComplianceRemediation{} - remNsName := types.NamespacedName{ - Name: remName, - Namespace: namespace, - } - err = f.Client.Get(goctx.TODO(), remNsName, remediation) - if err != nil { - return fmt.Errorf("couldn't get remediation %s: %w", remName, err) - } - if remediation.Status.ApplicationState != compv1alpha1.RemediationApplied { - return fmt.Errorf("remediation %s is not applied, but %s", remName, remediation.Status.ApplicationState) - } - - E2ELogf(t, "The test succeeded!") - return nil - }, - }, - ) -} diff --git a/tests/e2e/e2eutil/wait_util.go b/tests/e2e/e2eutil/wait_util.go deleted file mode 100644 index 2dc800a5d..000000000 --- a/tests/e2e/e2eutil/wait_util.go +++ /dev/null @@ -1,87 +0,0 @@ -package e2eutil - -import ( - "context" - "testing" - "time" - - test "github.com/ComplianceAsCode/compliance-operator/tests/e2e/framework" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// WaitForDeployment checks to see if a given deployment has a certain number of available replicas after a specified -// amount of time. If the deployment does not have the required number of replicas after 5 * retries seconds, -// the function returns an error. This can be used in multiple ways, like verifying that a required resource is ready -// before trying to use it, or to test. Failure handling, like simulated in SimulatePodFail. -func WaitForDeployment(t *testing.T, kubeclient kubernetes.Interface, namespace, name string, replicas int, - retryInterval, timeout time.Duration) error { - return waitForDeployment(t, kubeclient, namespace, name, replicas, retryInterval, timeout, false) -} - -// WaitForOperatorDeployment has the same functionality as WaitForDeployment but will no wait for the deployment if the -// test was run with a locally run operator (--up-local flag) -func WaitForOperatorDeployment(t *testing.T, kubeclient kubernetes.Interface, namespace, name string, replicas int, - retryInterval, timeout time.Duration) error { - return waitForDeployment(t, kubeclient, namespace, name, replicas, retryInterval, timeout, true) -} - -func waitForDeployment(t *testing.T, kubeclient kubernetes.Interface, namespace, name string, replicas int, - retryInterval, timeout time.Duration, isOperator bool) error { - if isOperator && test.Global.LocalOperator { - t.Log("Operator is running locally; skip waitForDeployment") - return nil - } - err := wait.Poll(retryInterval, timeout, func() (done bool, err error) { - deployment, err := kubeclient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - t.Logf("Waiting for availability of Deployment: %s in Namespace: %s \n", name, namespace) - return false, nil - } - return false, err - } - - if int(deployment.Status.AvailableReplicas) >= replicas { - return true, nil - } - t.Logf("Waiting for full availability of %s deployment (%d/%d)\n", name, - deployment.Status.AvailableReplicas, replicas) - return false, nil - }) - if err != nil { - return err - } - t.Logf("Deployment available (%d/%d)\n", replicas, replicas) - return nil -} - -func WaitForDeletion(t *testing.T, dynclient client.Client, obj client.Object, retryInterval, - timeout time.Duration) error { - - key := client.ObjectKeyFromObject(obj) - - kind := obj.GetObjectKind().GroupVersionKind().Kind - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - err := wait.Poll(retryInterval, timeout, func() (done bool, err error) { - err = dynclient.Get(ctx, key, obj) - if apierrors.IsNotFound(err) { - return true, nil - } - if err != nil { - return false, err - } - t.Logf("Waiting for %s %s to be deleted\n", kind, key) - return false, nil - }) - if err != nil { - return err - } - t.Logf("%s %s was deleted\n", kind, key) - return nil -} diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go index d502267ff..dcaeb7b2f 100644 --- a/tests/e2e/framework/framework.go +++ b/tests/e2e/framework/framework.go @@ -1,23 +1,18 @@ package framework import ( - "bytes" goctx "context" "flag" "fmt" "os" - "os/exec" "path/filepath" - "strings" "sync" - "testing" "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) _ "k8s.io/client-go/plugin/pkg/client/auth" "github.com/pborman/uuid" - log "github.com/sirupsen/logrus" extscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -26,7 +21,6 @@ import ( cgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" dynclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -230,69 +224,4 @@ func (f *Framework) addToScheme(addToScheme addToSchemeFunc, obj dynclient.Objec return nil } -func (f *Framework) runM(m *testing.M) (int, error) { - if !f.LocalOperator { - return m.Run(), nil - } - - // start local operator before running tests - outBuf := &bytes.Buffer{} - localCmd, err := f.setupLocalCommand() - if err != nil { - return 0, fmt.Errorf("failed to setup local command: %w", err) - } - localCmd.Stdout = outBuf - localCmd.Stderr = outBuf - - err = localCmd.Start() - if err != nil { - return 0, fmt.Errorf("failed to run operator locally: %w", err) - } - log.Info("Started local operator") - - // run the tests - exitCode := m.Run() - - // kill the local operator and print its logs - err = localCmd.Process.Kill() - if err != nil { - log.Warn("Failed to stop local operator process") - } - fmt.Printf("\n------ Local operator output ------\n%s\n", outBuf.String()) - return exitCode, nil -} - -func (f *Framework) setupLocalCommand() (*exec.Cmd, error) { - projectName := filepath.Base(MustGetwd()) - outputBinName := filepath.Join(BuildBinDir, projectName+"-local") - opts := GoCmdOptions{ - BinName: outputBinName, - PackagePath: filepath.Join(GetGoPkg(), filepath.ToSlash(ManagerDir)), - } - if err := GoBuild(opts); err != nil { - return nil, fmt.Errorf("failed to build local operator binary: %w", err) - } - - args := []string{} - if f.localOperatorArgs != "" { - args = append(args, strings.Split(f.localOperatorArgs, " ")...) - } - localCmd := exec.Command(outputBinName, args...) - - if f.kubeconfigPath != "" { - localCmd.Env = append(os.Environ(), fmt.Sprintf("%v=%v", KubeConfigEnvVar, f.kubeconfigPath)) - } else { - // we can hardcode index 0 as that is the highest priority kubeconfig to be loaded and will always - // be populated by NewDefaultClientConfigLoadingRules() - localCmd.Env = append(os.Environ(), fmt.Sprintf("%v=%v", KubeConfigEnvVar, - clientcmd.NewDefaultClientConfigLoadingRules().Precedence[0])) - } - watchNamespace := f.OperatorNamespace - ns, ok := os.LookupEnv(TestWatchNamespaceEnv) - if ok { - watchNamespace = ns - } - localCmd.Env = append(localCmd.Env, fmt.Sprintf("%v=%v", WatchNamespaceEnvVar, watchNamespace)) - return localCmd, nil -} diff --git a/tests/e2e/framework/main_entry.go b/tests/e2e/framework/main_entry.go index 5560a6422..3c6e21645 100644 --- a/tests/e2e/framework/main_entry.go +++ b/tests/e2e/framework/main_entry.go @@ -6,7 +6,6 @@ import ( "fmt" "log" "os" - "testing" "time" compv1alpha1 "github.com/ComplianceAsCode/compliance-operator/pkg/apis/compliance/v1alpha1" @@ -40,53 +39,6 @@ func (f *Framework) CleanUpOnError() bool { return f.cleanupOnError } -func MainEntry(m *testing.M) { - fopts := &frameworkOpts{} - fopts.addToFlagSet(flag.CommandLine) - // controller-runtime registers the --kubeconfig flag in client config - // package: - // https://github.com/kubernetes-sigs/controller-runtime/blob/v0.5.2/pkg/client/config/config.go#L39 - // - // If this flag is not registered, do so. Otherwise retrieve its value. - kcFlag := flag.Lookup(KubeConfigFlag) - if kcFlag == nil { - flag.StringVar(&fopts.kubeconfigPath, KubeConfigFlag, "", "path to kubeconfig") - } - - flag.Parse() - - if kcFlag != nil { - fopts.kubeconfigPath = kcFlag.Value.String() - } - - f, err := newFramework(fopts) - if err != nil { - log.Fatalf("Failed to create framework: %v", err) - } - - Global = f - - // Do suite setup - if err := f.SetUp(); err != nil { - log.Fatal(err) - } - - // Run the tests - exitCode, err := f.runM(m) - if err != nil { - log.Fatal(err) - } - - // Do suite teardown only if we have a successful test run or if we don't care - // about removing the test resources if the test failed. - if exitCode == 0 || (exitCode > 0 && f.cleanupOnError) { - if err = f.TearDown(); err != nil { - log.Fatal(err) - } - } - os.Exit(exitCode) -} - func (f *Framework) SetUp() error { log.Printf("switching to %s directory to setup and execute tests", f.projectRoot) err := os.Chdir(f.projectRoot) diff --git a/tests/e2e/helpers.go b/tests/e2e/helpers.go deleted file mode 100644 index 4d172afea..000000000 --- a/tests/e2e/helpers.go +++ /dev/null @@ -1,475 +0,0 @@ -package e2e - -import ( - "bytes" - goctx "context" - "errors" - "fmt" - "io" - "os" - "path" - "testing" - "time" - - backoff "github.com/cenkalti/backoff/v4" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" - - compv1alpha1 "github.com/ComplianceAsCode/compliance-operator/pkg/apis/compliance/v1alpha1" - "github.com/ComplianceAsCode/compliance-operator/tests/e2e/framework" - mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -var contentImagePath string -var shouldLogContainerOutput bool -var brokenContentImagePath string - -var defaultBackoff = backoff.WithMaxRetries(backoff.NewExponentialBackOff(), maxRetries) - -type ObjectResouceVersioner interface { - client.Object - metav1.Common -} - -func init() { - contentImagePath = os.Getenv("CONTENT_IMAGE") - - if contentImagePath == "" { - fmt.Println("Please set the 'CONTENT_IMAGE' environment variable") - os.Exit(1) - } - - logContainerOutputEnv := os.Getenv("LOG_CONTAINER_OUTPUT") - if logContainerOutputEnv != "" { - shouldLogContainerOutput = true - } - - brokenContentImagePath = os.Getenv("BROKEN_CONTENT_IMAGE") - - if brokenContentImagePath == "" { - fmt.Println("Please set the 'BROKEN_CONTENT_IMAGE' environment variable") - os.Exit(1) - } -} - -type testExecution struct { - Name string - IsParallel bool - TestFn func(*testing.T, *framework.Framework, *framework.Context, string) error -} - -func E2ELogf(t *testing.T, format string, args ...interface{}) { - t.Helper() - t.Logf(fmt.Sprintf("%s: %s", time.Now().Format(time.RFC3339), format), args...) -} - -func E2ELog(t *testing.T, args ...interface{}) { - t.Helper() - t.Log(fmt.Sprintf("%s: %s", time.Now().Format(time.RFC3339), fmt.Sprint(args...))) -} - -func E2EErrorf(t *testing.T, format string, args ...interface{}) { - t.Helper() - t.Errorf(fmt.Sprintf("E2E-FAILURE: %s: %s", time.Now().Format(time.RFC3339), format), args...) -} - -func E2EFatalf(t *testing.T, format string, args ...interface{}) { - t.Helper() - t.Fatalf(fmt.Sprintf("E2E-FAILURE: %s: %s", time.Now().Format(time.RFC3339), format), args...) -} - -// executeTest sets up everything that a e2e test needs to run, and executes the test. -func executeTests(t *testing.T, tests ...testExecution) { - // get global framework variables - f := framework.Global - ctx := framework.NewContext(t) - defer ctx.Cleanup() - - ns := f.OperatorNamespace - - // This context doesn't really do anything since we've already created - // the machine config pools in the framework setUp(). We can remove - // this when we flatten the tests. - testtype := ctx.GetTestType() - if testtype == framework.TestTypeAll || testtype == framework.TestTypeParallel { - t.Run("Parallel tests", func(t *testing.T) { - for _, test := range tests { - // Don't lose test reference - test := test - if test.IsParallel { - t.Run(test.Name, func(tt *testing.T) { - tt.Parallel() - if err := test.TestFn(tt, f, ctx, ns); err != nil { - tt.Error(err) - } - }) - } - } - }) - } else { - t.Log("Skipping parallel tests") - } - - if testtype == framework.TestTypeAll || testtype == framework.TestTypeSerial { - t.Run("Serial tests", func(t *testing.T) { - for _, test := range tests { - // Don't lose test reference - test := test - if !test.IsParallel { - t.Run(test.Name, func(t *testing.T) { - if err := test.TestFn(t, f, ctx, ns); err != nil { - t.Error(err) - } - }) - } - } - }) - } else { - t.Log("Skipping serial tests") - } -} - -func getCleanupOpts(ctx *framework.Context) *framework.CleanupOptions { - return &framework.CleanupOptions{ - TestContext: ctx, - Timeout: cleanupTimeout, - RetryInterval: cleanupRetryInterval, - } -} - -// waitForScanStatus will poll until the compliancescan that we're lookingfor reaches a certain status, or until -// a timeout is reached. -func waitForScanStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.ComplianceScanStatusPhase) { - exampleComplianceScan := &compv1alpha1.ComplianceScan{} - var lastErr error - defer logContainerOutput(t, f, namespace, name) - // retry and ignore errors until timeout - timeoutErr := wait.Poll(retryInterval, timeout, func() (bool, error) { - lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, exampleComplianceScan) - if lastErr != nil { - if apierrors.IsNotFound(lastErr) { - E2ELogf(t, "Waiting for availability of %s compliancescan\n", name) - return false, nil - } - E2ELogf(t, "Retrying. Got error: %v\n", lastErr) - return false, nil - } - - if exampleComplianceScan.Status.Phase == targetStatus { - return true, nil - } - E2ELogf(t, "Waiting for run of %s compliancescan (%s)\n", name, exampleComplianceScan.Status.Phase) - return false, nil - }) - - assertNoErrorNorTimeout(t, lastErr, timeoutErr, "waiting for compliance status") - - E2ELogf(t, "ComplianceScan ready (%s)\n", exampleComplianceScan.Status.Phase) -} - -// waitForScanStatus will poll until the compliancescan that we're lookingfor reaches a certain status, or until -// a timeout is reached. -func waitForSuiteScansStatus(t *testing.T, f *framework.Framework, namespace, name string, targetStatus compv1alpha1.ComplianceScanStatusPhase, targetComplianceStatus compv1alpha1.ComplianceScanStatusResult) error { - suite := &compv1alpha1.ComplianceSuite{} - var lastErr error - // retry and ignore errors until timeout - defer logContainerOutput(t, f, namespace, name) - timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) { - lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, suite) - if lastErr != nil { - if apierrors.IsNotFound(lastErr) { - E2ELogf(t, "Waiting for availability of %s compliancesuite\n", name) - return false, nil - } - E2ELogf(t, "Retrying. Got error: %v\n", lastErr) - return false, nil - } - - if suite.Status.Phase != targetStatus { - E2ELogf(t, "Waiting until suite %s reaches target status '%s'. Current status: %s", suite.Name, targetStatus, suite.Status.Phase) - return false, nil - } - - // The suite is now done, make sure the compliance status is expected - if suite.Status.Result != targetComplianceStatus { - return false, fmt.Errorf("expecting %s got %s", targetComplianceStatus, suite.Status.Result) - } - - // If we were expecting an error, there's no use checking the scans - if targetComplianceStatus == compv1alpha1.ResultError { - return true, nil - } - - // Now as a sanity check make sure that the scan statuses match the aggregated - // suite status - - // Got the suite. There should be at least one scan or else we're still initialising - if len(suite.Status.ScanStatuses) < 1 { - return false, errors.New("not enough scan statuses") - } - - //Examine the scan status both in the suite status and the scan - for _, scanStatus := range suite.Status.ScanStatuses { - if scanStatus.Phase != targetStatus { - return false, fmt.Errorf("suite in status %s but scan wrapper %s in status %s", targetStatus, scanStatus.Name, scanStatus.Phase) - } - - // If the status was present in the suite, then /any/ error - // should fail the test as the scans should be read /from/ - // the scan itself - waitForScanStatus(t, f, namespace, scanStatus.Name, targetStatus) - } - - return true, nil - }) - - // Error in function call - if lastErr != nil { - return lastErr - } - - // Timeout - if timeouterr != nil { - return timeouterr - } - - E2ELogf(t, "All scans in ComplianceSuite have finished (%s)\n", suite.Name) - return nil -} - -func getPodsForScan(f *framework.Framework, scanName string) ([]corev1.Pod, error) { - selectPods := map[string]string{ - compv1alpha1.ComplianceScanLabel: scanName, - } - var pods corev1.PodList - lo := &client.ListOptions{ - LabelSelector: labels.SelectorFromSet(selectPods), - } - err := f.Client.List(goctx.TODO(), &pods, lo) - if err != nil { - return nil, err - } - return pods.Items, nil -} - -func assertHasCheck(f *framework.Framework, suiteName, scanName string, check compv1alpha1.ComplianceCheckResult) error { - var getCheck compv1alpha1.ComplianceCheckResult - - err := f.Client.Get(goctx.TODO(), types.NamespacedName{Name: check.Name, Namespace: check.Namespace}, &getCheck) - if err != nil { - return err - } - - if getCheck.Status != check.Status { - return fmt.Errorf("expected result %s got result %s", check.Status, getCheck.Status) - } - - if getCheck.ID != check.ID { - return fmt.Errorf("expected ID %s got ID %s", check.ID, getCheck.ID) - } - - if getCheck.Labels == nil { - return fmt.Errorf("complianceCheckResult has no labels") - } - - if getCheck.Labels[compv1alpha1.SuiteLabel] != suiteName { - return fmt.Errorf("Did not find expected suite name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.SuiteLabel]) - } - - if getCheck.Labels[compv1alpha1.ComplianceScanLabel] != scanName { - return fmt.Errorf("Did not find expected scan name label %s, found %s", scanName, getCheck.Labels[compv1alpha1.ComplianceScanLabel]) - } - - if getCheck.Labels[compv1alpha1.ComplianceCheckResultSeverityLabel] != string(getCheck.Severity) { - return fmt.Errorf("did not find expected severity name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.ComplianceCheckResultSeverityLabel]) - } - - if getCheck.Labels[compv1alpha1.ComplianceCheckResultStatusLabel] != string(getCheck.Status) { - return fmt.Errorf("did not find expected status name label %s, found %s", suiteName, getCheck.Labels[compv1alpha1.ComplianceCheckResultStatusLabel]) - } - - return nil -} - -// waitForNodesToBeReady waits until all the nodes in the cluster have -// reached the expected machineConfig. -func waitForNodesToBeReady(t *testing.T, f *framework.Framework, errorMessage string) { - err := wait.PollImmediate(machineOperationRetryInterval, machineOperationTimeout, func() (bool, error) { - var nodes corev1.NodeList - - f.Client.List(goctx.TODO(), &nodes, &client.ListOptions{}) - for _, node := range nodes.Items { - E2ELogf(t, "Node %s has config %s, desired config %s state %s", - node.Name, - node.Annotations["machineconfiguration.openshift.io/currentConfig"], - node.Annotations["machineconfiguration.openshift.io/desiredConfig"], - node.Annotations["machineconfiguration.openshift.io/state"]) - - if (node.Annotations["machineconfiguration.openshift.io/currentConfig"] != node.Annotations["machineconfiguration.openshift.io/desiredConfig"]) || - (node.Annotations["machineconfiguration.openshift.io/state"] != "Done") { - E2ELogf(t, "Node %s still updating", node.Name) - return false, nil - } - E2ELogf(t, "Node %s was updated", node.Name) - } - - E2ELogf(t, "All machines updated") - return true, nil - }) - - if err != nil { - E2EFatalf(t, "%s: %s", errorMessage, err) - } -} - -func waitForGenericRemediationToBeAutoApplied(t *testing.T, f *framework.Framework, remName, remNamespace string) { - rem := &compv1alpha1.ComplianceRemediation{} - var lastErr error - timeouterr := wait.Poll(retryInterval, timeout, func() (bool, error) { - lastErr = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: remName, Namespace: remNamespace}, rem) - if apierrors.IsNotFound(lastErr) { - E2ELogf(t, "Waiting for availability of %s remediation\n", remName) - return false, nil - } - if lastErr != nil { - E2ELogf(t, "Retrying. Got error: %v\n", lastErr) - return false, nil - } - E2ELogf(t, "Found remediation: %s\n", remName) - if rem.Status.ApplicationState == compv1alpha1.RemediationNotApplied || rem.Status.ApplicationState == compv1alpha1.RemediationPending { - E2ELogf(t, "Retrying. remediation not yet applied. Remediation Name: %s, ApplicationState: %s\n", remName, rem.Status.ApplicationState) - } - // wait for the remediation to get applied - time.Sleep(5 * time.Second) - return true, nil - }) - assertNoErrorNorTimeout(t, lastErr, timeouterr, "getting remediation before auto-applying it") - E2ELogf(t, "Machines updated with remediation") - waitForNodesToBeReady(t, f, "Failed to wait for nodes to come back up after auto-applying remediation") -} - -// IsMachineConfigPoolConditionPresentAndEqual returns true when conditionType is present and equal to status. -func IsMachineConfigPoolConditionPresentAndEqual(conditions []mcfgv1.MachineConfigPoolCondition, conditionType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) bool { - for _, condition := range conditions { - if condition.Type == conditionType { - return condition.Status == status - } - } - return false -} - -func writeToArtifactsDir(dir, scan, pod, container, log string) error { - logPath := path.Join(dir, fmt.Sprintf("%s_%s_%s.log", scan, pod, container)) - logFile, err := os.Create(logPath) - if err != nil { - return err - } - // #nosec G307 - defer logFile.Close() - _, err = io.WriteString(logFile, log) - if err != nil { - return err - } - logFile.Sync() - return nil -} - -func logContainerOutput(t *testing.T, f *framework.Framework, namespace, name string) { - if shouldLogContainerOutput == false { - return - } - - // Try all container/init variants for each pod and the pod itself (self), log nothing if the container is not applicable. - containers := []string{"self", "api-resource-collector", "log-collector", "scanner", "content-container"} - artifacts := os.Getenv("ARTIFACT_DIR") - if artifacts == "" { - return - } - pods, err := getPodsForScan(f, name) - if err != nil { - E2ELogf(t, "Warning: Error getting pods for container logging: %s", err) - } else { - for _, pod := range pods { - for _, con := range containers { - logOpts := &corev1.PodLogOptions{} - if con != "self" { - logOpts.Container = con - } - req := f.KubeClient.CoreV1().Pods(namespace).GetLogs(pod.Name, logOpts) - podLogs, err := req.Stream(goctx.TODO()) - if err != nil { - // Silence this error if the container is not valid for the pod - if !apierrors.IsBadRequest(err) { - E2ELogf(t, "error getting logs for %s/%s: reason: %v, err: %v", pod.Name, con, apierrors.ReasonForError(err), err) - } - continue - } - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - E2ELogf(t, "error copying logs for %s/%s: %v", pod.Name, con, err) - continue - } - logs := buf.String() - if len(logs) == 0 { - E2ELogf(t, "no logs for %s/%s", pod.Name, con) - } else { - err := writeToArtifactsDir(artifacts, name, pod.Name, con, logs) - if err != nil { - E2ELogf(t, "error writing logs for %s/%s: %v", pod.Name, con, err) - } else { - E2ELogf(t, "wrote logs for %s/%s", pod.Name, con) - } - } - } - } - } -} - -func reRunScan(t *testing.T, f *framework.Framework, scanName, namespace string) error { - scanKey := types.NamespacedName{Name: scanName, Namespace: namespace} - err := backoff.Retry(func() error { - foundScan := &compv1alpha1.ComplianceScan{} - geterr := f.Client.Get(goctx.TODO(), scanKey, foundScan) - if geterr != nil { - return geterr - } - - scapCopy := foundScan.DeepCopy() - if scapCopy.Annotations == nil { - scapCopy.Annotations = make(map[string]string) - } - scapCopy.Annotations[compv1alpha1.ComplianceScanRescanAnnotation] = "" - return f.Client.Update(goctx.TODO(), scapCopy) - }, defaultBackoff) - - if err != nil { - return fmt.Errorf("couldn't update scan to re-launch it: %w", err) - } - - E2ELogf(t, "Scan re-launched") - return nil -} - -func assertNoErrorNorTimeout(t *testing.T, err, timeoutErr error, message string) { - if finalErr := processErrorOrTimeout(err, timeoutErr, message); finalErr != nil { - E2EFatalf(t, "%s", finalErr) - } -} - -func processErrorOrTimeout(err, timeoutErr error, message string) error { - // Error in function call - if err != nil { - return fmt.Errorf("Got error when %s: %w", message, err) - } - // Timeout - if timeoutErr != nil { - return fmt.Errorf("Timed out when %s: %w", message, timeoutErr) - } - return nil -} diff --git a/tests/e2e/main_test.go b/tests/e2e/main_test.go deleted file mode 100644 index 021c04576..000000000 --- a/tests/e2e/main_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package e2e - -import ( - "testing" - - "github.com/ComplianceAsCode/compliance-operator/tests/e2e/framework" -) - -func TestMain(m *testing.M) { - framework.MainEntry(m) -} diff --git a/tests/e2e/serial/main_test.go b/tests/e2e/serial/main_test.go index d1c578e20..9ff7da960 100644 --- a/tests/e2e/serial/main_test.go +++ b/tests/e2e/serial/main_test.go @@ -1251,3 +1251,203 @@ func TestVariableTemplate(t *testing.T) { t.Fatal(err) } } + +func TestKubeletConfigRemediation(t *testing.T) { + f := framework.Global + suiteName := "kubelet-remediation-test-suite" + + tp := &compv1alpha1.TailoredProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: suiteName, + Namespace: f.OperatorNamespace, + }, + Spec: compv1alpha1.TailoredProfileSpec{ + Title: "kubelet-remediation-test", + Description: "A test tailored profile to test kubelet remediation", + EnableRules: []compv1alpha1.RuleReferenceSpec{ + { + Name: "ocp4-kubelet-enable-streaming-connections", + Rationale: "To be tested", + }, + { + Name: "ocp4-version-detect-in-ocp", + Rationale: "To be tested", + }, + }, + SetValues: []compv1alpha1.VariableValueSpec{ + { + Name: "ocp4-var-streaming-connection-timeouts", + Rationale: "Value to be set", + Value: "8h0m0s", + }, + { + Name: "ocp4-var-role-master", + Rationale: "Value to be set", + Value: framework.TestPoolName, + }, + { + Name: "ocp4-var-role-worker", + Rationale: "Value to be set", + Value: framework.TestPoolName, + }, + }, + }, + } + createTPErr := f.Client.Create(context.TODO(), tp, nil) + if createTPErr != nil { + t.Fatal(createTPErr) + } + defer f.Client.Delete(context.TODO(), tp) + + ssb := &compv1alpha1.ScanSettingBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: suiteName, + Namespace: f.OperatorNamespace, + }, + Profiles: []compv1alpha1.NamedObjectReference{ + { + APIGroup: "compliance.openshift.io/v1alpha1", + Kind: "TailoredProfile", + Name: suiteName, + }, + }, + SettingsRef: &compv1alpha1.NamedObjectReference{ + APIGroup: "compliance.openshift.io/v1alpha1", + Kind: "ScanSetting", + Name: "e2e-default-auto-apply", + }, + } + + err := f.Client.Create(context.TODO(), ssb, nil) + if err != nil { + t.Fatal(err) + } + defer f.Client.Delete(context.TODO(), ssb) + + // Ensure that all the scans in the suite have finished and are marked as Done + err = f.WaitForSuiteScansStatus(f.OperatorNamespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultNonCompliant) + if err != nil { + t.Fatal(err) + } + + scanName := suiteName + + // We need to check that the remediation is auto-applied and save + // the object so we can delete it later + remName := scanName + "-kubelet-enable-streaming-connections" + f.WaitForGenericRemediationToBeAutoApplied(remName, f.OperatorNamespace) + err = f.WaitForGenericRemediationToBeAutoApplied(remName, f.OperatorNamespace) + if err != nil { + t.Fatal(err) + } + + err = f.ReRunScan(scanName, f.OperatorNamespace) + if err != nil { + t.Fatal(err) + } + + // Scan has been re-started + log.Printf("scan phase should be reset") + err = f.WaitForSuiteScansStatus(f.OperatorNamespace, suiteName, compv1alpha1.PhaseRunning, compv1alpha1.ResultNotAvailable) + if err != nil { + t.Fatal(err) + } + + // Ensure that all the scans in the suite have finished and are marked as Done + log.Printf("let's wait for it to be done now") + err = f.WaitForSuiteScansStatus(f.OperatorNamespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultCompliant) + if err != nil { + t.Fatal(err) + } + log.Printf("scan re-run has finished") + + // Now the check should be passing + checkResult := compv1alpha1.ComplianceCheckResult{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kubelet-enable-streaming-connections", suiteName), + Namespace: f.OperatorNamespace, + }, + ID: "xccdf_org.ssgproject.content_rule_kubelet_enable_streaming_connections", + Status: compv1alpha1.CheckResultPass, + Severity: compv1alpha1.CheckResultSeverityMedium, + } + err = f.AssertHasCheck(suiteName, scanName, checkResult) + if err != nil { + t.Fatal(err) + } + + err = f.AssertHasCheck(suiteName, scanName, checkResult) + if err != nil { + t.Fatal(err) + } + + // The remediation must not be Outdated + remediation := &compv1alpha1.ComplianceRemediation{} + remNsName := types.NamespacedName{ + Name: remName, + Namespace: f.OperatorNamespace, + } + err = f.Client.Get(context.TODO(), remNsName, remediation) + if err != nil { + t.Fatalf("couldn't get remediation %s: %s", remName, err) + } + if remediation.Status.ApplicationState != compv1alpha1.RemediationApplied { + t.Fatalf("remediation %s is not applied, but %s", remName, remediation.Status.ApplicationState) + } +} + +//testExecution{ +// Name: "TestNodeSchedulingErrorFailsTheScan", +// IsParallel: false, +// TestFn: func(t *testing.T, f *framework.Framework, ctx *framework.Context, namespace string) error { +// workerNodesLabel := map[string]string{ +// "node-role.kubernetes.io/worker": "", +// } +// workerNodes := getNodesWithSelectorOrFail(t, f, workerNodesLabel) +// // taintedNode := &workerNodes[0] +// taintKey := "co-e2e" +// taintVal := "val" +// taint := corev1.Taint{ +// Key: taintKey, +// Value: taintVal, +// Effect: corev1.TaintEffectNoSchedule, +// } +// if err := taintNode(t, f, taintedNode, taint); err != nil { +// E2ELog(t, "Tainting node failed") +// return err +// } +// suiteName := getObjNameFromTest(t) +// scanName := suiteName +// suite := &compv1alpha1.ComplianceSuite{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: suiteName, +// Namespace: namespace, +// }, +// Spec: compv1alpha1.ComplianceSuiteSpec{ +// Scans: []compv1alpha1.ComplianceScanSpecWrapper{ +// { +// ComplianceScanSpec: compv1alpha1.ComplianceScanSpec{ +// ContentImage: contentImagePath, +// Profile: "xccdf_org.ssgproject.content_profile_moderate", +// Rule: "xccdf_org.ssgproject.content_rule_no_netrc_files", +// Content: rhcosContentFile, +// NodeSelector: workerNodesLabel, +// ComplianceScanSettings: compv1alpha1.ComplianceScanSettings{ +// Debug: true, +// }, +// }, +// Name: scanName, +// }, +// }, +// }, +// } +// if err := f.Client.Create(goctx.TODO(), suite, getCleanupOpts(ctx)); err != nil { +// return err +// } +// // err := waitForSuiteScansStatus(t, f, namespace, suiteName, compv1alpha1.PhaseDone, compv1alpha1.ResultError) +// if err != nil { +// return err +// } +// return removeNodeTaint(t, f, taintedNode.Name, taintKey) +// }, +//}, diff --git a/utils/e2e-test-wait.sh b/utils/e2e-test-wait.sh new file mode 100755 index 000000000..f383a5e4b --- /dev/null +++ b/utils/e2e-test-wait.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Wait up to two minutes for compliance operator CRDs to cleanup. +i=0 +w=10 +while [[ $i -lt 12 ]]; do + if [[ $(oc api-resources --api-group=compliance.openshift.io --no-headers) ]]; then + echo "compliance.openshift.io CRDs still exist..." + sleep $w + i=$i+1 + continue + fi + echo "no compliance.openshift.io CRDs left in deployment" + exit +done +echo "timed out waiting for compliance.openshift.io CRDs to cleanup" +exit 1