From 5717073b082c14d125884f12db947d68f9cda543 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Mon, 6 May 2024 14:05:28 -0700 Subject: [PATCH] Install Kibana per tenant --- api/v1/kibana_types.go | 2 +- .../dashboards/dashboards_controller.go | 15 +- .../logstorage/elastic/elastic_controller.go | 14 +- .../elastic/external_elastic_controller.go | 328 +++++++++++++++++- .../linseed/linseed_controller_test.go | 1 + pkg/controller/manager/manager_controller.go | 12 +- pkg/render/logstorage/eck/eck.go | 7 +- pkg/render/logstorage/kibana/kibana.go | 165 ++++++--- pkg/render/logstorage/kibana/kibana_test.go | 1 + pkg/render/logstorage/linseed/linseed.go | 12 +- pkg/render/manager.go | 2 + 11 files changed, 474 insertions(+), 85 deletions(-) diff --git a/api/v1/kibana_types.go b/api/v1/kibana_types.go index 5c844d34b9..6809445f31 100644 --- a/api/v1/kibana_types.go +++ b/api/v1/kibana_types.go @@ -60,7 +60,7 @@ type KibanaPodSpec struct { type KibanaContainer struct { // Name is an enum which identifies the Kibana Deployment container by name. // Supported values are: kibana - // +kubebuilder:validation:Enum=kibana + // +kubebuilder:validation:Enum=kibana,challenger Name string `json:"name"` // Resources allows customization of limits and requests for compute resources such as cpu and memory. diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index b6ab6f86e8..f02c2a7849 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -113,7 +113,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("logstorage-dashboards-controller failed to watch logstorage Tigerastatus: %w", err) } if opts.MultiTenant { - if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch Tenant resource: %w", err) } } @@ -122,6 +122,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { // For single-tenant, everything is installed in the tigera-manager namespace. // Make a helper for determining which namespaces to use based on tenancy mode. helper := utils.NewNamespaceHelper(opts.MultiTenant, render.ElasticsearchNamespace, "") + kibanaHelper := utils.NewNamespaceHelper(opts.MultiTenant, kibana.Namespace, "") // Watch secrets this controller cares about. secretsToWatch := []string{ @@ -142,10 +143,10 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } // Catch if something modifies the resources that this controller consumes. - if err := utils.AddServiceWatch(c, kibana.ServiceName, helper.InstallNamespace()); err != nil { + if err := utils.AddServiceWatch(c, kibana.ServiceName, kibanaHelper.InstallNamespace()); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) } - if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), eventHandler); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) } @@ -267,6 +268,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil var externalKibanaSecret *corev1.Secret if !d.elasticExternal { + // This is the configuration for zero tenant or single tenant with internal elastic // Wait for Elasticsearch to be installed and available. elasticsearch, err := utils.GetElasticsearch(ctx, d.client) if err != nil { @@ -277,7 +279,8 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", nil, reqLogger) return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil } - } else { + } else if !d.multiTenant { + // This is the configuration for single tenant with external elastic // If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint. if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" { reqLogger.Error(nil, "Kibana URL must be specified for this tenant") @@ -311,6 +314,10 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, err } } + } else { + // This is the configuration for multi-tenant + // We connect to a kibana service deployed in the tenant namespace + kibanaHost = fmt.Sprintf("tigera-secure-kb-http.%s.svc", helper.InstallNamespace()) } // Query the username and password this Dashboards Installer instance should use to authenticate with Elasticsearch. diff --git a/pkg/controller/logstorage/elastic/elastic_controller.go b/pkg/controller/logstorage/elastic/elastic_controller.go index ada5a2e0a0..341f609623 100644 --- a/pkg/controller/logstorage/elastic/elastic_controller.go +++ b/pkg/controller/logstorage/elastic/elastic_controller.go @@ -22,6 +22,7 @@ import ( cmnv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil" "github.com/go-logr/logr" apps "k8s.io/api/apps/v1" @@ -453,7 +454,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. var kibanaCR *kbv1.Kibana if kibanaEnabled { - kibanaCR, err = r.getKibana(ctx) + kibanaCR, err = getKibana(ctx, r.client, kibana.Namespace) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) return reconcile.Result{}, err @@ -504,7 +505,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. var kbService *corev1.Service if kibanaEnabled { // For now, Kibana is only supported in single tenant configurations. - kbService, err = r.getKibanaService(ctx) + kbService, err = getKibanaService(ctx, r.client, kibana.Namespace) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) return reconcile.Result{}, err @@ -563,6 +564,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. UnusedTLSSecret: unusedTLSSecret, UsePSP: r.usePSP, Enabled: kibanaEnabled, + Namespace: kibana.Namespace, }), } @@ -708,9 +710,9 @@ func (r *ElasticSubController) getElasticsearchService(ctx context.Context) (*co return &svc, nil } -func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, error) { +func getKibana(ctx context.Context, cli k8s.Client, namespace string) (*kbv1.Kibana, error) { kb := kbv1.Kibana{} - err := r.client.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: kibana.Namespace}, &kb) + err := cli.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: namespace}, &kb) if err != nil { if errors.IsNotFound(err) { return nil, nil @@ -720,9 +722,9 @@ func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, err return &kb, nil } -func (r *ElasticSubController) getKibanaService(ctx context.Context) (*corev1.Service, error) { +func getKibanaService(ctx context.Context, cli k8s.Client, namespace string) (*corev1.Service, error) { svc := corev1.Service{} - err := r.client.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: kibana.Namespace}, &svc) + err := cli.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: namespace}, &svc) if err != nil { if errors.IsNotFound(err) { return nil, nil diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index c0006afb0f..a6bcb2b2ba 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -17,6 +17,22 @@ package elastic import ( "context" "fmt" + "net/url" + + kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/render/logstorage/eck" + "github.com/tigera/operator/pkg/render/logstorage/kibana" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "github.com/tigera/operator/pkg/controller/logstorage/initializer" @@ -43,12 +59,14 @@ import ( type ExternalESController struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme - status status.StatusManager - provider operatorv1.Provider - clusterDomain string - usePSP bool + client client.Client + scheme *runtime.Scheme + status status.StatusManager + provider operatorv1.Provider + tierWatchReady *utils.ReadyFlag + clusterDomain string + usePSP bool + multiTenant bool } func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { @@ -67,17 +85,25 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { usePSP: opts.UsePSP, clusterDomain: opts.ClusterDomain, provider: opts.DetectedProvider, + multiTenant: opts.MultiTenant, } r.status.Run(opts.ShutdownContext) // Create a controller using the reconciler and register it with the manager to receive reconcile calls. - c, err := ctrlruntime.NewController("log-storage-external-es-controller", mgr, controller.Options{Reconciler: r}) + c, err := ctrlruntime.NewController("log-storage-external-es-controllerr", mgr, controller.Options{Reconciler: r}) if err != nil { return err } + // Determine how to handle watch events for cluster-scoped resources. For multi-tenant clusters, + // we should update all tenants whenever one changes. For single-tenatn clusters, we can just queue the object. + var eventHandler handler.EventHandler = &handler.EnqueueRequestForObject{} + if opts.MultiTenant { + eventHandler = utils.EnqueueAllTenants(mgr.GetClient()) + } + // Configure watches for operator.tigera.io APIs this controller cares about. - if err = c.WatchObject(&operatorv1.LogStorage{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.LogStorage{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch LogStorage resource: %w", err) } if err = utils.AddInstallationWatch(c); err != nil { @@ -86,28 +112,108 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { if err = imageset.AddImageSetWatch(c); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ImageSet: %w", err) } - if err = c.WatchObject(&operatorv1.ManagementCluster{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.ManagementCluster{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ManagementCluster resource: %w", err) } - if err = c.WatchObject(&operatorv1.ManagementClusterConnection{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.ManagementClusterConnection{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ManagementClusterConnection resource: %w", err) } if err = utils.AddTigeraStatusWatch(c, initializer.TigeraStatusLogStorageElastic); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch logstorage Tigerastatus: %w", err) } - if err = utils.AddConfigMapWatch(c, "cloud-kibana-config", common.OperatorNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + if err = utils.AddConfigMapWatch(c, "cloud-kibana-config", common.OperatorNamespace(), eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch the ConfigMap resource: %w", err) } + + if opts.MultiTenant { + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to establish a connection to k8s: %w", err) + } + + // Establish a watch for any tenant related changes + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { + return fmt.Errorf("log-storage-access-controller failed to watch Tenant resource: %w", err) + } + // Establish a watch on the tenant CA secret across all namespaces if multi-tenancy is enabled. + if err = utils.AddSecretsWatch(c, certificatemanagement.TenantCASecretName, ""); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) + } + + // The namespace(s) we need to monitor depend upon what tenancy mode we're running in. + // For single-tenant, everything is installed in the tigera-manager namespace. + // Make a helper for determining which namespaces to use based on tenancy mode. + kibanaNamespaceHelper := utils.NewNamespaceHelper(opts.MultiTenant, kibana.Namespace, "") + + // Start goroutines to establish watches against projectcalico.org/v3 resources. + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, c, k8sClient, log, r.tierWatchReady) + go utils.WaitToAddNetworkPolicyWatches(c, k8sClient, log, []types.NamespacedName{ + {Name: kibana.PolicyName, Namespace: kibanaNamespaceHelper.InstallNamespace()}, + {Name: eck.OperatorPolicyName, Namespace: eck.OperatorNamespace}, + {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: kibanaNamespaceHelper.InstallNamespace()}, + }) + + if err = c.WatchObject(&apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Namespace: eck.OperatorNamespace, Name: eck.OperatorName}, + }, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch StatefulSet resource: %w", err) + } + + if err = utils.AddConfigMapWatch(c, eck.LicenseConfigMapName, eck.OperatorNamespace, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch ConfigMap resource: %w", err) + } + + if err = c.WatchObject(&kbv1.Kibana{ + ObjectMeta: metav1.ObjectMeta{Namespace: kibanaNamespaceHelper.InstallNamespace(), Name: kibana.CRName}, + }, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Kibana resource: %w", err) + } + + for _, secretName := range []string{ + kibana.TigeraKibanaCertSecret, + } { + if err = utils.AddSecretsWatch(c, secretName, kibanaNamespaceHelper.TruthNamespace()); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) + } + } + // TODO: ALINA - We need a user for kibana + } + + // Perform periodic reconciliation. This acts as a backstop to catch reconcile issues, + // and also makes sure we spot when things change that might not trigger a reconciliation. + err = utils.AddPeriodicReconcile(c, utils.PeriodicReconcileTime, eventHandler) + if err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to create periodic reconcile watch: %w", err) + } + return nil } func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + kibanaHelper := utils.NewNamespaceHelper(r.multiTenant, kibana.Namespace, request.Namespace) reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling LogStorage") + // We skip requests without a namespace specified in multi-tenant setups. + if r.multiTenant && request.Namespace == "" { + return reconcile.Result{}, nil + } + + // When running in multi-tenant mode, we need to install Kibana in tenant Namespaces. However, the LogStorage + // resource is still cluster-scoped (since ES is a cluster-wide resource), so we need to look elsewhere to determine + // which tenant namespaces require a Kibana instance. We use the tenant API to determine the set of namespaces that should have Kibana. + tenant, _, err := utils.GetTenant(ctx, r.multiTenant, r.client, request.Namespace) + if errors.IsNotFound(err) { + reqLogger.Info("No Tenant in this Namespace, skip") + return reconcile.Result{}, nil + } else if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Tenant", err, reqLogger) + return reconcile.Result{}, err + } + ls := &operatorv1.LogStorage{} - err := r.client.Get(ctx, utils.DefaultTSEEInstanceKey, ls) + err = r.client.Get(ctx, utils.DefaultTSEEInstanceKey, ls) if err != nil { if !errors.IsNotFound(err) { return reconcile.Result{}, err @@ -117,7 +223,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } r.status.OnCRFound() - _, install, err := utils.GetInstallation(context.Background(), r.client) + variant, install, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) @@ -132,22 +238,216 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + managementCluster, err := utils.GetManagementCluster(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) + return reconcile.Result{}, err + } + + managementClusterConnection, err := utils.GetManagementClusterConnection(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementClusterConnection", err, reqLogger) + return reconcile.Result{}, err + } + if managementClusterConnection != nil { + // LogStorage is not support on a managed cluster. + r.status.SetDegraded(operatorv1.ResourceNotReady, "LogStorage is not supported on a managed cluster", nil, reqLogger) + return reconcile.Result{}, nil + } + pullSecrets, err := utils.GetNetworkingPullSecrets(install, r.client) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurring while retrieving pull secrets", err, reqLogger) return reconcile.Result{}, err } + var multiTenantComponents []render.Component + if r.multiTenant { + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created, see the 'tiers' TigeraStatus for more information", err, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } + + esLicenseType, err := utils.GetElasticLicenseType(ctx, r.client, reqLogger) + if err != nil { + // If LicenseConfigMapName is not found, it means ECK operator is not running yet, log the information and proceed + if errors.IsNotFound(err) { + reqLogger.Info("ConfigMap not found yet", "name", eck.LicenseConfigMapName) + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get elastic license", err, reqLogger) + return reconcile.Result{}, err + } + } + + // ECK will be deployed per management cluster and it will be configured + // to watch all namespaces in order to create a Kibana deployment + multiTenantComponents = append(multiTenantComponents, + eck.ECK(&eck.Configuration{ + LogStorage: ls, + Installation: install, + ManagementCluster: managementCluster, + PullSecrets: pullSecrets, + Provider: r.provider, + ElasticLicenseType: esLicenseType, + UsePSP: r.usePSP, + // TODO: Alina check if false is the correct value for multi-tenant + ApplyTrial: false, + Tenant: tenant, + }), + ) + + // TODO: Retrieve from tenant CR + var kibanaEnabled = true + if kibanaEnabled { + // Collect the certificates we need to provision Kibana. + // These will have been provisioned already by the ES secrets controller. + opts := []certificatemanager.Option{ + certificatemanager.WithLogger(reqLogger), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(r.client, install, r.clusterDomain, kibanaHelper.TruthNamespace(), opts...) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) + return reconcile.Result{}, err + } + + // We want to retrieve Kibana certificate for all supported configurations + kbDNSNames := dns.GetServiceDNSNames(kibana.ServiceName, kibanaHelper.InstallNamespace(), r.clusterDomain) + kibanaKeyPair, err := cm.GetKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) + if err != nil { + log.Error(err, err.Error()) + r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to create Kibana secrets", err, reqLogger) + return reconcile.Result{}, err + } + + if kibanaKeyPair == nil { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Waiting for kibana key pair to be available", err, reqLogger) + return reconcile.Result{}, nil + } + + kbService, err := getKibanaService(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) + return reconcile.Result{}, err + } + kibanaCR, err := getKibana(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) + return reconcile.Result{}, err + } + + var unusedTLSSecret *corev1.Secret + if install.CertificateManagement != nil { + // Eck requires us to provide a TLS secret for Kibana. It will also inspect that it has a + // certificate and private key. However, when certificate management is enabled, we do not want to use a + // private key stored in a secret. For this reason, we mount a dummy that the actual Elasticsearch and Kibana + // pods are never using. + unusedTLSSecret, err = utils.GetSecret(ctx, r.client, relasticsearch.UnusedCertSecret, common.OperatorNamespace()) + if unusedTLSSecret == nil { + unusedTLSSecret, err = certificatemanagement.CreateSelfSignedSecret(relasticsearch.UnusedCertSecret, common.OperatorNamespace(), relasticsearch.UnusedCertSecret, []string{}) + unusedTLSSecret.Data[corev1.TLSCertKey] = install.CertificateManagement.CACert + } + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Failed to retrieve secret %s/%s", common.OperatorNamespace(), relasticsearch.UnusedCertSecret), err, reqLogger) + return reconcile.Result{}, nil + } + } + + // Query the trusted bundle from the namespace. + trustedBundle, err := cm.LoadTrustedBundle(ctx, r.client, tenant.Namespace) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger) + return reconcile.Result{}, err + } + + // If we're using an external ES, the Tenant resource must specify the ES endpoint. + if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.URL == "" { + reqLogger.Error(nil, "Elasticsearch URL must be specified for this tenant") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL must be specified for this tenant", nil, reqLogger) + return reconcile.Result{}, nil + } + + // Determine the host and port from the URL. + url, err := url.Parse(tenant.Spec.Elastic.URL) + if err != nil { + reqLogger.Error(err, "Elasticsearch URL is invalid") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL is invalid", err, reqLogger) + return reconcile.Result{}, nil + } + + var esClientSecret *corev1.Secret + if tenant.ElasticMTLS() { + // If mTLS is enabled, get the secret containing the CA and client certificate. + esClientSecret = &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, esClientSecret) + if err != nil { + reqLogger.Error(err, "Failed to read external Elasticsearch client certificate secret") + r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch client certificate secret to be available", err, reqLogger) + return reconcile.Result{}, err + } + } + + // TODO: Alina - Copy user to tenant namespace + // TODO: Alina Retrieve it from tenant CR + baseURL := "tigera-kibana" + multiTenantComponents = append(multiTenantComponents, + kibana.Kibana(&kibana.Configuration{ + LogStorage: ls, + Installation: install, + Kibana: kibanaCR, + KibanaKeyPair: kibanaKeyPair, + PullSecrets: pullSecrets, + Provider: r.provider, + KbService: kbService, + ClusterDomain: r.clusterDomain, + BaseURL: baseURL, + TrustedBundle: trustedBundle, + UnusedTLSSecret: unusedTLSSecret, + UsePSP: r.usePSP, + Enabled: kibanaEnabled, + Tenant: tenant, + Namespace: kibanaHelper.InstallNamespace(), + ElasticClientSecret: esClientSecret, + ExternalElasticEndpoint: url.String(), + }), + ) + } + } + flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) clusterConfig := relasticsearch.NewClusterConfig(render.DefaultElasticsearchClusterName, ls.Replicas(), logstoragecommon.DefaultElasticsearchShards, flowShards) - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. + var hdler utils.ComponentHandler + if r.multiTenant { + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + } else { + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + } + externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets) if err := hdler.CreateOrUpdateOrDelete(ctx, externalElasticsearch, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err } + for _, component := range multiTenantComponents { + if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + return reconcile.Result{}, err + } + if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } + } + r.status.ReadyToMonitor() r.status.ClearDegraded() return reconcile.Result{}, nil diff --git a/pkg/controller/logstorage/linseed/linseed_controller_test.go b/pkg/controller/logstorage/linseed/linseed_controller_test.go index 355b4c5cd2..b176d95560 100644 --- a/pkg/controller/logstorage/linseed/linseed_controller_test.go +++ b/pkg/controller/logstorage/linseed/linseed_controller_test.go @@ -226,6 +226,7 @@ var _ = Describe("LogStorage Linseed controller", func() { ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, Spec: operatorv1.ImageSetSpec{ Images: []operatorv1.Image{ + // TODO: Alina are all needed ? {Image: "tigera/elasticsearch", Digest: "sha256:elasticsearchhash"}, {Image: "tigera/kube-controllers", Digest: "sha256:kubecontrollershash"}, {Image: "tigera/kibana", Digest: "sha256:kibanahash"}, diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index c97ce0247e..33489137de 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -105,17 +105,17 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { }) // Watch for changes to primary resource Manager - err = c.WatchObject(&operatorv1.Manager{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.Manager{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch primary resource: %w", err) } - err = c.WatchObject(&operatorv1.TLSTerminatedRoute{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.TLSTerminatedRoute{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch TLSTerminatedRoutes: %w", err) } - err = c.WatchObject(&operatorv1.TLSPassThroughRoute{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.TLSPassThroughRoute{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch TLSPassThroughRoutes: %w", err) } @@ -146,7 +146,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("manager-controller failed to watch ImageSet: %w", err) } if opts.MultiTenant { - if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { return fmt.Errorf("manager-controller failed to watch Tenant resource: %w", err) } } @@ -158,8 +158,8 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } for _, namespace := range namespacesToWatch { for _, secretName := range []string{ - // We need to watch for es-gateway certificate because es-proxy still creates a - // client to talk to elastic via es-gateway + // TODO: ALINA - Do we need to add back esgateway here ? + // TODO: ALINA - Do we need to add Kibana for multi-tenant ? render.ManagerTLSSecretName, relasticsearch.PublicCertSecret, render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, render.ManagerInternalTLSSecretName, monitor.PrometheusServerTLSSecretName, certificatemanagement.CASecretName, diff --git a/pkg/render/logstorage/eck/eck.go b/pkg/render/logstorage/eck/eck.go index 330d61954f..1609e5d836 100644 --- a/pkg/render/logstorage/eck/eck.go +++ b/pkg/render/logstorage/eck/eck.go @@ -62,6 +62,7 @@ type Configuration struct { Provider operatorv1.Provider ElasticLicenseType render.ElasticsearchLicenseType ApplyTrial bool + Tenant *operatorv1.Tenant // Whether the cluster supports pod security policies. UsePSP bool @@ -309,6 +310,10 @@ func (e *eck) operatorStatefulSet() *appsv1.StatefulSet { memoryRequest = c.ResourceRequirements.Requests[corev1.ResourceMemory] } } + var namespacesToWatch string + if e.cfg.Tenant.MultiTenant() { + namespacesToWatch = "tigera-elasticsearch,tigera-kibana" + } s := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -353,7 +358,7 @@ func (e *eck) operatorStatefulSet() *appsv1.StatefulSet { // Verbosity level of logs. -2=Error, -1=Warn, 0=Info, 0 and above=Debug Args: []string{ "manager", - "--namespaces=tigera-elasticsearch,tigera-kibana", + fmt.Sprintf("--namespaces=%s", namespacesToWatch), "--log-verbosity=0", "--metrics-port=0", "--container-registry=" + e.cfg.Installation.Registry, diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index 439dbccc95..4fcf59c965 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -93,16 +93,26 @@ type Configuration struct { TrustedBundle certificatemanagement.TrustedBundleRO UnusedTLSSecret *corev1.Secret Enabled bool + Tenant *operatorv1.Tenant + Namespace string + + // Secret containing client certificate and key for connecting to the Elastic cluster. If configured, + // mTLS is used between Challenger and the external Elastic cluster. + // TODO: Alina Mount volume + ElasticClientSecret *corev1.Secret + ElasticChallengerUser *corev1.Secret + ExternalElasticEndpoint string // Whether the cluster supports pod security policies. UsePSP bool } type kibana struct { - cfg *Configuration - kibanaSecrets []*corev1.Secret - kibanaImage string - csrImage string + cfg *Configuration + kibanaSecrets []*corev1.Secret + kibanaImage string + challengerImage string + csrImage string } func (k *kibana) ResolveImages(is *operatorv1.ImageSet) error { @@ -121,6 +131,13 @@ func (k *kibana) ResolveImages(is *operatorv1.ImageSet) error { errMsgs = append(errMsgs, err.Error()) } + if k.cfg.Tenant.MultiTenant() { + k.challengerImage, err = components.GetReference(components.ComponentESGateway, reg, path, prefix, is) + if err != nil { + errMsgs = append(errMsgs, err.Error()) + } + } + if k.cfg.Installation.CertificateManagement != nil { k.csrImage, err = certificatemanagement.ResolveCSRInitImage(k.cfg.Installation, is) if err != nil { @@ -168,20 +185,28 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { // - securityContext.capabilities.drop=["ALL"] // - securityContext.runAsNonRoot=true // - securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost" - toCreate = append(toCreate, render.CreateNamespace(Namespace, k.cfg.Installation.KubernetesProvider, render.PSSBaseline)) + // We only create the certain objects in a zero tenant or single tenant installation + // For example, tigera-kibana namespace, pull secrets and default deny + // For multi-tenancy, these are already created by other renderers + if !k.cfg.Tenant.MultiTenant() { + toCreate = append(toCreate, render.CreateNamespace(Namespace, k.cfg.Installation.KubernetesProvider, render.PSSBaseline)) + if len(k.cfg.PullSecrets) > 0 { + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(Namespace, k.cfg.PullSecrets...)...)...) + } + toCreate = append(toCreate, networkpolicy.AllowTigeraDefaultDeny(Namespace)) + } toCreate = append(toCreate, k.allowTigeraPolicy()) - toCreate = append(toCreate, networkpolicy.AllowTigeraDefaultDeny(Namespace)) toCreate = append(toCreate, k.serviceAccount()) - - if len(k.cfg.PullSecrets) > 0 { - toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(Namespace, k.cfg.PullSecrets...)...)...) - } - if len(k.kibanaSecrets) > 0 { toCreate = append(toCreate, secret.ToRuntimeObjects(k.kibanaSecrets...)...) } - toCreate = append(toCreate, k.kibanaCR()) + // TODO: ALINA: I think we do the same in Linseed + if k.cfg.ElasticClientSecret != nil { + // If using External ES, we need to copy the client certificates into the tenant namespace to be mounted. + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ElasticClientSecret)...)...) + } + } else { toDelete = append(toDelete, k.kibanaCR()) } @@ -194,7 +219,7 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { if k.cfg.KibanaKeyPair != nil && k.cfg.KibanaKeyPair.UseCertificateManagement() { // We need to render a secret. It won't ever be used by Kibana for TLS, but is needed to pass ECK's checks. // If the secret changes / gets reconciled, it will not trigger a re-render of Kibana. - unusedSecret := k.cfg.KibanaKeyPair.Secret(Namespace) + unusedSecret := k.cfg.KibanaKeyPair.Secret(k.cfg.Namespace) unusedSecret.Data = k.cfg.UnusedTLSSecret.Data toCreate = append(toCreate, unusedSecret) } @@ -213,7 +238,7 @@ func (k *kibana) serviceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: ObjectName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, } } @@ -230,7 +255,6 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { } config := map[string]interface{}{ - "elasticsearch.ssl.certificateAuthorities": []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"}, "server": server, "xpack.security.session.lifespan": "8h", "xpack.security.session.idleTimeout": "30m", @@ -243,6 +267,13 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { "telemetry.optIn": false, } + if k.cfg.Tenant.MultiTenant() { + config["elasticsearch.host"] = "http://localhost:8080" + config["elasticsearch.ssl.verificationMode"] = "none" + } else { + config["elasticsearch.ssl.certificateAuthorities"] = []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"} + } + var initContainers []corev1.Container var volumes []corev1.Volume var automountToken bool @@ -279,7 +310,8 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, - }) + }, + ) } count := int32(1) @@ -287,11 +319,68 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { count = *k.cfg.Installation.ControlPlaneReplicas } + containers := []corev1.Container{ + { + Name: "kibana", + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: fmt.Sprintf("/%s/login", BasePath), + Port: intstr.IntOrString{ + IntVal: Port, + }, + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + SecurityContext: securitycontext.NewNonRootContext(), + VolumeMounts: volumeMounts, + }, + } + + if k.cfg.Tenant.MultiTenant() { + volumes = append(volumes, k.cfg.TrustedBundle.Volume()) + containers = append(containers, corev1.Container{ + Name: "challenger", + Env: []corev1.EnvVar{ + { + Name: "ES_GATEWAY_LOG_LEVEL", + Value: "INFO", + }, + { + Name: "ES_GATEWAY_KIBANA_CATCH_ALL_ROUTE", + Value: "/", + }, + { + Name: "ES_GATEWAY_ELASTIC_ENDPOINT", + Value: k.cfg.ExternalElasticEndpoint, + }, + {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "elastic"}, + {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + // TODO: Alina change user + Name: render.ElasticsearchAdminUserSecret, + }, + Key: "elastic", + }, + }}, + {Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", Value: k.cfg.TrustedBundle.MountPath()}, + }, + Command: []string{ + "/usr/bin/es-gateway", "-run-as-challenger", + }, + Image: k.challengerImage, + SecurityContext: securitycontext.NewNonRootContext(), + VolumeMounts: k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType()), + }) + } + kibana := &kbv1.Kibana{ TypeMeta: metav1.TypeMeta{Kind: "Kibana", APIVersion: "kibana.k8s.elastic.co/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: CRName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, Labels: map[string]string{ "k8s-app": CRName, }, @@ -310,13 +399,9 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { }, }, }, - ElasticsearchRef: cmnv1.ObjectSelector{ - Name: render.ElasticsearchName, - Namespace: render.ElasticsearchNamespace, - }, PodTemplate: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Namespace: Namespace, + Namespace: k.cfg.Namespace, Annotations: map[string]string{ TLSAnnotationHash: rmeta.SecretsAnnotationHash(k.kibanaSecrets...), }, @@ -332,28 +417,20 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Tolerations: k.cfg.Installation.ControlPlaneTolerations, InitContainers: initContainers, AutomountServiceAccountToken: &automountToken, - Containers: []corev1.Container{{ - Name: "kibana", - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: fmt.Sprintf("/%s/login", BasePath), - Port: intstr.IntOrString{ - IntVal: Port, - }, - Scheme: corev1.URISchemeHTTPS, - }, - }, - }, - SecurityContext: securitycontext.NewNonRootContext(), - VolumeMounts: volumeMounts, - }}, - Volumes: volumes, + Containers: containers, + Volumes: volumes, }, }, }, } + if !k.cfg.Tenant.MultiTenant() { + kibana.Spec.ElasticsearchRef = cmnv1.ObjectSelector{ + Name: render.ElasticsearchName, + Namespace: render.ElasticsearchNamespace, + } + } + if k.cfg.Installation.ControlPlaneReplicas != nil && *k.cfg.Installation.ControlPlaneReplicas > 1 { kibana.Spec.PodTemplate.Spec.Affinity = podaffinity.NewPodAntiAffinity(CRName, Namespace) } @@ -398,7 +475,7 @@ func (k *kibana) clusterRoleBinding() *rbacv1.ClusterRoleBinding { { Kind: "ServiceAccount", Name: ObjectName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, }, } @@ -410,6 +487,7 @@ func (k *kibana) kibanaPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { // Allow access to Kibana func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { + networkPolicyHelper := networkpolicy.Helper(k.cfg.Tenant.MultiTenant(), k.cfg.Namespace) egressRules := []v3.Rule{ { Action: v3.Allow, @@ -425,6 +503,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { Protocol: &networkpolicy.TCPProtocol, Destination: networkpolicy.KubeAPIServerServiceSelectorEntityRule, }, + // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, @@ -439,7 +518,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: PolicyName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, Spec: v3.NetworkPolicySpec{ Order: &networkpolicy.HighPrecedenceOrder, @@ -465,6 +544,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { }, Destination: kibanaPortIngressDestination, }, + // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, @@ -474,9 +554,10 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, - Source: networkpolicy.DefaultHelper().DashboardInstallerSourceEntityRule(), + Source: networkPolicyHelper.DashboardInstallerSourceEntityRule(), Destination: kibanaPortIngressDestination, }, + // TODO: ALINA - DO WE NEED TO ADD MANAGER? { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, diff --git a/pkg/render/logstorage/kibana/kibana_test.go b/pkg/render/logstorage/kibana/kibana_test.go index ebf20789bb..7aef738c8d 100644 --- a/pkg/render/logstorage/kibana/kibana_test.go +++ b/pkg/render/logstorage/kibana/kibana_test.go @@ -104,6 +104,7 @@ var _ = Describe("Kibana rendering tests", func() { TrustedBundle: bundle, UsePSP: true, Enabled: true, + Namespace: kibana.Namespace, } }) diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index c44c9329a5..62710d07a2 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -168,7 +168,7 @@ func (l *linseed) Objects() (toCreate, toDelete []client.Object) { toCreate = append(toCreate, l.linseedPodSecurityPolicy()) } if l.cfg.ElasticClientSecret != nil { - // If using External ES, we need to copy the client certificates into Linseed's naespace to be mounted. + // If using External ES, we need to copy the client certificates into Linseed's namespace to be mounted. toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(l.cfg.Namespace, l.cfg.ElasticClientSecret)...)...) } return toCreate, toDelete @@ -691,13 +691,3 @@ func (l *linseed) linseedAllowTigeraPolicy() *v3.NetworkPolicy { }, } } - -// LinseedNamespace determine the namespace in which Linseed is running. -// For management and standalone clusters, this is always the tigera-elasticsearch -// namespace. For multi-tenant management clusters, this is the tenant namespace -func LinseedNamespace(tenant *operatorv1.Tenant) string { - if tenant.MultiTenant() { - return tenant.Namespace - } - return "tigera-elasticsearch" -} diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 6ac4489114..770e16db12 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -429,6 +429,7 @@ func (c *managerComponent) managerProxyProbe() *corev1.Probe { func KibanaEnabled(tenant *operatorv1.Tenant, installation *operatorv1.InstallationSpec) bool { enableKibana := !operatorv1.IsFIPSModeEnabled(installation.FIPSMode) if tenant.MultiTenant() { + // TODO: Alina Extract from CR enableKibana = false } return enableKibana @@ -613,6 +614,7 @@ func (c *managerComponent) managerEsProxyContainer() corev1.Container { env := []corev1.EnvVar{ {Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)}, + // TODO: ALINA - For multi-tenancy this needs to be in the tenant namespace {Name: "ELASTIC_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, {Name: "FIPS_MODE_ENABLED", Value: operatorv1.IsFIPSModeEnabledString(c.cfg.Installation.FIPSMode)}, {Name: "LINSEED_CLIENT_CERT", Value: certPath},