Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: test case multi-namespace_support #312

Merged
merged 1 commit into from
Jul 31, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 31 additions & 10 deletions test/e2e/monitoring_stack_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import (

"k8s.io/apimachinery/pkg/util/wait"

"k8s.io/utils/pointer"

"github.com/google/go-cmp/cmp"

stack "github.com/rhobs/observability-operator/pkg/apis/monitoring/v1alpha1"
Expand Down Expand Up @@ -861,7 +863,7 @@ func namespaceSelectorTest(t *testing.T) {

promClient := framework.NewPrometheusClient("http://localhost:9090")
if pollErr := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) {
query := `prometheus_build_info{namespace=~"test-ns-.*"}`
query := `version{pod="prometheus-example-app",namespace=~"test-ns-.*"}`
result, err := promClient.Query(query)
if err != nil {
return false, nil
Expand Down Expand Up @@ -889,7 +891,7 @@ func deployDemoApp(t *testing.T, nsName string, nsLabels, resourceLabels map[str
}

// deploy a pod, service, service-monitor into that namespace
prom := newPrometheusPod(t, "prometheus", ns.Name)
prom := newPrometheusExampleAppPod(t, "prometheus-example-app", ns.Name)
if err := f.K8sClient.Create(context.Background(), prom); err != nil {
return fmt.Errorf("failed to create demo app %s/%s: %w", nsName, prom.Name, err)
}
Expand All @@ -902,15 +904,15 @@ func deployDemoApp(t *testing.T, nsName string, nsLabels, resourceLabels map[str
// these are prometheus ports
svc.Spec.Ports = []corev1.ServicePort{{
Name: "metrics",
Port: 9090,
TargetPort: intstr.FromInt(9090),
Port: 8080,
TargetPort: intstr.FromInt(8080),
}}

if err := f.K8sClient.Create(context.Background(), svc); err != nil {
return fmt.Errorf("failed to create service for demo app %s/%s: %w", nsName, svc.Name, err)
}

svcMon := newServiceMonitor(t, ns.Name, "prometheus", resourceLabels, svcLabels, "metrics")
svcMon := newServiceMonitor(t, ns.Name, "prometheus-example-app", resourceLabels, svcLabels, "metrics")
if err := f.K8sClient.Create(context.Background(), svcMon); err != nil {
return fmt.Errorf("failed to create servicemonitor for demo service %s/%s: %w", nsName, svcMon.Name, err)
}
Expand Down Expand Up @@ -972,7 +974,7 @@ func newService(t *testing.T, name, namespace string, labels, selector map[strin
return svc
}

func newPrometheusPod(t *testing.T, name, ns string) *corev1.Pod {
func newPrometheusExampleAppPod(t *testing.T, name, ns string) *corev1.Pod {
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Expand All @@ -983,16 +985,35 @@ func newPrometheusPod(t *testing.T, name, ns string) *corev1.Pod {
Namespace: ns,
Labels: map[string]string{
"app.kubernetes.io/name": "prometheus",
"app.kubernetes.io/version": "2.39.1",
"app.kubernetes.io/version": "multiarch-v0.4.1",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "prometheus",
Image: "quay.io/prometheus/prometheus:v2.39.1",
Name: "prometheus-example-app",
// This image is rebuild of the `prometheus-example-app` available on GitHub:
// https://github.com/brancz/prometheus-example-app

// The rebuild includes multi-arch support, as indicated by the link:
// https://quay.io/repository/openshifttest/prometheus-example-app/manifest/sha256:382dc349f82d730b834515e402b48a9c7e2965d0efbc42388bd254f424f6193e

// Additionally, this image is accessible on an OCP disconnected cluster,
// allowing tests to be run in that environment.
Image: "quay.io/openshifttest/prometheus-example-app@sha256:382dc349f82d730b834515e402b48a9c7e2965d0efbc42388bd254f424f6193e",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if we should instead rely on what upstream Prometheus Operator is using ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would prefer to use this one, for I run the upstream test cases on OCP with all kinds of profiles share between QE team during ObO release, the image will be mirrored by default and the test cases can get the image and run on disconnected or private OCP clusters, other wise, the case will failed on private and disconnected OCP cluster.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@lihongyan1 would you mind leaving a comment that describes this and a link to the upstream that is mirrored? Just for future us when someone asks why we don't use the upstream version :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added comments

SecurityContext: &corev1.SecurityContext{
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{
Type: "RuntimeDefault",
},
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{
"ALL",
},
},
},
Ports: []corev1.ContainerPort{{
Name: "metrics",
ContainerPort: 9090,
ContainerPort: 8080,
}},
}},
},
Expand Down