diff --git a/apis/v1alpha1/opentelemetrycollector_types.go b/apis/v1alpha1/opentelemetrycollector_types.go index d16abab40f..dc2f86fce6 100644 --- a/apis/v1alpha1/opentelemetrycollector_types.go +++ b/apis/v1alpha1/opentelemetrycollector_types.go @@ -183,6 +183,16 @@ type OpenTelemetryTargetAllocatorPrometheusCR struct { // Enabled indicates whether to use a PrometheusOperator custom resources as targets or not. // +optional Enabled bool `json:"enabled,omitempty"` + // PodMonitors to be selected for target discovery. + // This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a + // PodMonitor's meta labels. The requirements are ANDed. + // +optional + PodMonitorSelector map[string]string `json:"podMonitorSelector,omitempty"` + // ServiceMonitors to be selected for target discovery. + // This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a + // ServiceMonitor's meta labels. The requirements are ANDed. + // +optional + ServiceMonitorSelector map[string]string `json:"serviceMonitorSelector,omitempty"` } // ScaleSubresourceStatus defines the observed state of the OpenTelemetryCollector's diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 3b2613097c..1038386010 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -480,7 +480,7 @@ func (in *OpenTelemetryTargetAllocator) DeepCopyInto(out *OpenTelemetryTargetAll *out = new(int32) **out = **in } - out.PrometheusCR = in.PrometheusCR + in.PrometheusCR.DeepCopyInto(&out.PrometheusCR) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTelemetryTargetAllocator. @@ -496,6 +496,20 @@ func (in *OpenTelemetryTargetAllocator) DeepCopy() *OpenTelemetryTargetAllocator // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenTelemetryTargetAllocatorPrometheusCR) DeepCopyInto(out *OpenTelemetryTargetAllocatorPrometheusCR) { *out = *in + if in.PodMonitorSelector != nil { + in, out := &in.PodMonitorSelector, &out.PodMonitorSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceMonitorSelector != nil { + in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenTelemetryTargetAllocatorPrometheusCR. diff --git a/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml b/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml index fcdfed60de..1e95f54bd7 100644 --- a/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml +++ b/bundle/manifests/opentelemetry.io_opentelemetrycollectors.yaml @@ -1722,6 +1722,22 @@ spec: description: Enabled indicates whether to use a PrometheusOperator custom resources as targets or not. type: boolean + podMonitorSelector: + additionalProperties: + type: string + description: PodMonitors to be selected for target discovery. + This is a map of {key,value} pairs. Each {key,value} in + the map is going to exactly match a label in a PodMonitor's + meta labels. The requirements are ANDed. + type: object + serviceMonitorSelector: + additionalProperties: + type: string + description: ServiceMonitors to be selected for target discovery. + This is a map of {key,value} pairs. Each {key,value} in + the map is going to exactly match a label in a ServiceMonitor's + meta labels. The requirements are ANDed. + type: object type: object replicas: description: Replicas is the number of pod instances for the underlying diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index af26544f0c..ed16eddce6 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -39,10 +39,12 @@ const DefaultResyncTime = 5 * time.Minute const DefaultConfigFilePath string = "/conf/targetallocator.yaml" type Config struct { - LabelSelector map[string]string `yaml:"label_selector,omitempty"` - Config *promconfig.Config `yaml:"config"` - AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` - FilterStrategy *string `yaml:"filter_strategy,omitempty"` + LabelSelector map[string]string `yaml:"label_selector,omitempty"` + Config *promconfig.Config `yaml:"config"` + AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` + FilterStrategy *string `yaml:"filter_strategy,omitempty"` + PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` + ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` } func (c Config) GetAllocationStrategy() string { diff --git a/cmd/otel-allocator/config/config_test.go b/cmd/otel-allocator/config/config_test.go index 3c45ffefe4..48ba9f39ae 100644 --- a/cmd/otel-allocator/config/config_test.go +++ b/cmd/otel-allocator/config/config_test.go @@ -15,7 +15,12 @@ package config import ( + "fmt" "testing" + "time" + + commonconfig "github.com/prometheus/common/config" + promconfig "github.com/prometheus/prometheus/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery" @@ -23,39 +28,130 @@ import ( "github.com/stretchr/testify/assert" ) -const testFile = "./testdata/config_test.yaml" - -func TestConfigLoad(t *testing.T) { - expectedFileSDConfig := &file.SDConfig{ - Files: []string{"./file_sd_test.json"}, - RefreshInterval: model.Duration(300000000000), +func TestLoad(t *testing.T) { + type args struct { + file string } - expectedStaticSDConfig := discovery.StaticConfig{ + tests := []struct { + name string + args args + want Config + wantErr assert.ErrorAssertionFunc + }{ + { + name: "file sd load", + args: args{ + file: "./testdata/config_test.yaml", + }, + want: Config{ + LabelSelector: map[string]string{ + "app.kubernetes.io/instance": "default.test", + "app.kubernetes.io/managed-by": "opentelemetry-operator", + }, + Config: &promconfig.Config{ + GlobalConfig: promconfig.GlobalConfig{ + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + EvaluationInterval: model.Duration(60 * time.Second), + }, + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "prometheus", + HonorTimestamps: true, + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: commonconfig.HTTPClientConfig{ + FollowRedirects: true, + }, + ServiceDiscoveryConfigs: []discovery.Config{ + &file.SDConfig{ + Files: []string{"./file_sd_test.json"}, + RefreshInterval: model.Duration(5 * time.Minute), + }, + discovery.StaticConfig{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "prom.domain:9001"}, + {model.AddressLabel: "prom.domain:9002"}, + {model.AddressLabel: "prom.domain:9003"}, + }, + Labels: model.LabelSet{ + "my": "label", + }, + Source: "0", + }, + }, + }, + }, + }, + }, + }, + wantErr: assert.NoError, + }, { - Targets: []model.LabelSet{ - {model.AddressLabel: "prom.domain:9001"}, - {model.AddressLabel: "prom.domain:9002"}, - {model.AddressLabel: "prom.domain:9003"}, + name: "service monitor pod monitor selector", + args: args{ + file: "./testdata/pod_service_selector_test.yaml", }, - Labels: model.LabelSet{ - "my": "label", + want: Config{ + LabelSelector: map[string]string{ + "app.kubernetes.io/instance": "default.test", + "app.kubernetes.io/managed-by": "opentelemetry-operator", + }, + Config: &promconfig.Config{ + GlobalConfig: promconfig.GlobalConfig{ + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + EvaluationInterval: model.Duration(60 * time.Second), + }, + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "prometheus", + HonorTimestamps: true, + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: commonconfig.HTTPClientConfig{ + FollowRedirects: true, + }, + ServiceDiscoveryConfigs: []discovery.Config{ + discovery.StaticConfig{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "prom.domain:9001"}, + {model.AddressLabel: "prom.domain:9002"}, + {model.AddressLabel: "prom.domain:9003"}, + }, + Labels: model.LabelSet{ + "my": "label", + }, + Source: "0", + }, + }, + }, + }, + }, + }, + PodMonitorSelector: map[string]string{ + "release": "test", + }, + ServiceMonitorSelector: map[string]string{ + "release": "test", + }, }, - Source: "0", + wantErr: assert.NoError, }, } - - cfg := Config{} - err := unmarshal(&cfg, testFile) - assert.NoError(t, err) - - scrapeConfig := *cfg.Config.ScrapeConfigs[0] - actualFileSDConfig := scrapeConfig.ServiceDiscoveryConfigs[0] - actulaStaticSDConfig := scrapeConfig.ServiceDiscoveryConfigs[1] - t.Log(actulaStaticSDConfig) - - assert.Equal(t, cfg.LabelSelector["app.kubernetes.io/instance"], "default.test") - assert.Equal(t, cfg.LabelSelector["app.kubernetes.io/managed-by"], "opentelemetry-operator") - assert.Equal(t, scrapeConfig.JobName, "prometheus") - assert.Equal(t, expectedFileSDConfig, actualFileSDConfig) - assert.Equal(t, expectedStaticSDConfig, actulaStaticSDConfig) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Load(tt.args.file) + if !tt.wantErr(t, err, fmt.Sprintf("Load(%v)", tt.args.file)) { + return + } + assert.Equalf(t, tt.want, got, "Load(%v)", tt.args.file) + }) + } } diff --git a/cmd/otel-allocator/config/testdata/pod_service_selector_test.yaml b/cmd/otel-allocator/config/testdata/pod_service_selector_test.yaml new file mode 100644 index 0000000000..c0ff54ad36 --- /dev/null +++ b/cmd/otel-allocator/config/testdata/pod_service_selector_test.yaml @@ -0,0 +1,14 @@ +label_selector: + app.kubernetes.io/instance: default.test + app.kubernetes.io/managed-by: opentelemetry-operator +pod_monitor_selector: + release: test +service_monitor_selector: + release: test +config: + scrape_configs: + - job_name: prometheus + static_configs: + - targets: ["prom.domain:9001", "prom.domain:9002", "prom.domain:9003"] + labels: + my: label \ No newline at end of file diff --git a/cmd/otel-allocator/main.go b/cmd/otel-allocator/main.go index 9dd52a1054..07d1373f17 100644 --- a/cmd/otel-allocator/main.go +++ b/cmd/otel-allocator/main.go @@ -83,7 +83,7 @@ func main() { os.Exit(1) } - watcher, err := allocatorWatcher.NewWatcher(setupLog, cliConf, allocator) + watcher, err := allocatorWatcher.NewWatcher(setupLog, cfg, cliConf, allocator) if err != nil { setupLog.Error(err, "Can't start the watchers") os.Exit(1) diff --git a/cmd/otel-allocator/watcher/main.go b/cmd/otel-allocator/watcher/main.go index 8dbd71036f..d983c40f74 100644 --- a/cmd/otel-allocator/watcher/main.go +++ b/cmd/otel-allocator/watcher/main.go @@ -59,21 +59,21 @@ func (e EventSource) String() string { return eventSourceToString[e] } -func NewWatcher(logger logr.Logger, config config.CLIConfig, allocator allocation.Allocator) (*Manager, error) { +func NewWatcher(logger logr.Logger, cfg config.Config, cliConfig config.CLIConfig, allocator allocation.Allocator) (*Manager, error) { watcher := Manager{ allocator: allocator, Events: make(chan Event), Errors: make(chan error), } - fileWatcher, err := newConfigMapWatcher(logger, config) + fileWatcher, err := newConfigMapWatcher(logger, cliConfig) if err != nil { return nil, err } watcher.watchers = append(watcher.watchers, &fileWatcher) - if *config.PromCRWatcherConf.Enabled { - promWatcher, err := newCRDMonitorWatcher(config) + if *cliConfig.PromCRWatcherConf.Enabled { + promWatcher, err := newCRDMonitorWatcher(cfg, cliConfig) if err != nil { return nil, err } diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 09eee6f50f..2b33b633bc 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -33,8 +33,8 @@ import ( "k8s.io/client-go/tools/cache" ) -func newCRDMonitorWatcher(config allocatorconfig.CLIConfig) (*PrometheusCRWatcher, error) { - mClient, err := monitoringclient.NewForConfig(config.ClusterConfig) +func newCRDMonitorWatcher(cfg allocatorconfig.Config, cliConfig allocatorconfig.CLIConfig) (*PrometheusCRWatcher, error) { + mClient, err := monitoringclient.NewForConfig(cliConfig.ClusterConfig) if err != nil { return nil, err } @@ -61,11 +61,17 @@ func newCRDMonitorWatcher(config allocatorconfig.CLIConfig) (*PrometheusCRWatche return nil, err } + servMonSelector := getSelector(cfg.ServiceMonitorSelector) + + podMonSelector := getSelector(cfg.PodMonitorSelector) + return &PrometheusCRWatcher{ - kubeMonitoringClient: mClient, - informers: monitoringInformers, - stopChannel: make(chan struct{}), - configGenerator: generator, + kubeMonitoringClient: mClient, + informers: monitoringInformers, + stopChannel: make(chan struct{}), + configGenerator: generator, + serviceMonitorSelector: servMonSelector, + podMonitorSelector: podMonSelector, }, nil } @@ -74,6 +80,17 @@ type PrometheusCRWatcher struct { informers map[string]*informers.ForResource stopChannel chan struct{} configGenerator *prometheus.ConfigGenerator + + serviceMonitorSelector labels.Selector + podMonitorSelector labels.Selector +} + +func getSelector(s map[string]string) labels.Selector { + sel := labels.NewSelector() + if s == nil { + return sel + } + return labels.SelectorFromSet(s) } // Start wrapped informers and wait for an initial sync. @@ -118,7 +135,8 @@ func (w *PrometheusCRWatcher) Close() error { func (w *PrometheusCRWatcher) CreatePromConfig(kubeConfigPath string) (*promconfig.Config, error) { serviceMonitorInstances := make(map[string]*monitoringv1.ServiceMonitor) - smRetrieveErr := w.informers[monitoringv1.ServiceMonitorName].ListAll(labels.NewSelector(), func(sm interface{}) { + + smRetrieveErr := w.informers[monitoringv1.ServiceMonitorName].ListAll(w.serviceMonitorSelector, func(sm interface{}) { monitor := sm.(*monitoringv1.ServiceMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) serviceMonitorInstances[key] = monitor @@ -128,7 +146,7 @@ func (w *PrometheusCRWatcher) CreatePromConfig(kubeConfigPath string) (*promconf } podMonitorInstances := make(map[string]*monitoringv1.PodMonitor) - pmRetrieveErr := w.informers[monitoringv1.PodMonitorName].ListAll(labels.NewSelector(), func(pm interface{}) { + pmRetrieveErr := w.informers[monitoringv1.PodMonitorName].ListAll(w.podMonitorSelector, func(pm interface{}) { monitor := pm.(*monitoringv1.PodMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) podMonitorInstances[key] = monitor diff --git a/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml b/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml index 1196232386..f04031ee7f 100644 --- a/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml +++ b/config/crd/bases/opentelemetry.io_opentelemetrycollectors.yaml @@ -1720,6 +1720,22 @@ spec: description: Enabled indicates whether to use a PrometheusOperator custom resources as targets or not. type: boolean + podMonitorSelector: + additionalProperties: + type: string + description: PodMonitors to be selected for target discovery. + This is a map of {key,value} pairs. Each {key,value} in + the map is going to exactly match a label in a PodMonitor's + meta labels. The requirements are ANDed. + type: object + serviceMonitorSelector: + additionalProperties: + type: string + description: ServiceMonitors to be selected for target discovery. + This is a map of {key,value} pairs. Each {key,value} in + the map is going to exactly match a label in a ServiceMonitor's + meta labels. The requirements are ANDed. + type: object type: object replicas: description: Replicas is the number of pod instances for the underlying diff --git a/docs/api.md b/docs/api.md index 015c87490a..832176e986 100644 --- a/docs/api.md +++ b/docs/api.md @@ -4606,6 +4606,20 @@ PrometheusCR defines the configuration for the retrieval of PrometheusOperator C Enabled indicates whether to use a PrometheusOperator custom resources as targets or not.
false + + podMonitorSelector + map[string]string + + PodMonitors to be selected for target discovery. This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed.
+ + false + + serviceMonitorSelector + map[string]string + + ServiceMonitors to be selected for target discovery. This is a map of {key,value} pairs. Each {key,value} in the map is going to exactly match a label in a ServiceMonitor's meta labels. The requirements are ANDed.
+ + false diff --git a/pkg/collector/reconcile/configmap.go b/pkg/collector/reconcile/configmap.go index dce74a4f2c..801fa7ba63 100644 --- a/pkg/collector/reconcile/configmap.go +++ b/pkg/collector/reconcile/configmap.go @@ -124,6 +124,14 @@ func desiredTAConfigMap(params Params) (corev1.ConfigMap, error) { taConfig["filter_strategy"] = params.Instance.Spec.TargetAllocator.FilterStrategy } + if params.Instance.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector != nil { + taConfig["service_monitor_selector"] = ¶ms.Instance.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector + } + + if params.Instance.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector != nil { + taConfig["pod_monitor_selector"] = ¶ms.Instance.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector + } + taConfigYAML, err := yaml.Marshal(taConfig) if err != nil { return corev1.ConfigMap{}, err diff --git a/pkg/collector/reconcile/configmap_test.go b/pkg/collector/reconcile/configmap_test.go index bdc13d79ca..22bef856a4 100644 --- a/pkg/collector/reconcile/configmap_test.go +++ b/pkg/collector/reconcile/configmap_test.go @@ -209,6 +209,45 @@ label_selector: assert.Equal(t, expectedData, actual.Data) }) + t.Run("should return expected target allocator config map with label selectors", func(t *testing.T) { + expectedLables["app.kubernetes.io/component"] = "opentelemetry-targetallocator" + expectedLables["app.kubernetes.io/name"] = "test-targetallocator" + + expectedData := map[string]string{ + "targetallocator.yaml": `allocation_strategy: least-weighted +config: + scrape_configs: + - job_name: otel-collector + scrape_interval: 10s + static_configs: + - targets: + - 0.0.0.0:8888 + - 0.0.0.0:9999 +label_selector: + app.kubernetes.io/component: opentelemetry-collector + app.kubernetes.io/instance: default.test + app.kubernetes.io/managed-by: opentelemetry-operator +pod_monitor_selector: + release: test +service_monitor_selector: + release: test +`, + } + p := params() + p.Instance.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector = map[string]string{ + "release": "test", + } + p.Instance.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector = map[string]string{ + "release": "test", + } + actual, err := desiredTAConfigMap(p) + assert.NoError(t, err) + + assert.Equal(t, "test-targetallocator", actual.Name) + assert.Equal(t, expectedLables, actual.Labels) + assert.Equal(t, expectedData, actual.Data) + + }) }