Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Add support for OpenShift console on native Kubernetes #2996

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 20 additions & 14 deletions cmd/hyperconverged-cluster-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func main() {
needLeaderElection := !ci.IsRunningLocally()

// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, getManagerOptions(operatorNamespace, needLeaderElection, ci.IsMonitoringAvailable(), ci.IsOpenshift(), scheme))
mgr, err := manager.New(cfg, getManagerOptions(operatorNamespace, needLeaderElection, ci.IsMonitoringAvailable(), ci.IsNativeOpenshift(), ci.HasOpenshiftConsole(), scheme))
cmdHelper.ExitOnError(err, "can't initiate manager")

// register pprof instrumentation if HCO_PPROF_ADDR is set
Expand Down Expand Up @@ -192,7 +192,7 @@ func main() {

// Restricts the cache's ListWatch to specific fields/labels per GVK at the specified object to control the memory impact
// this is used to completely overwrite the NewCache function so all the interesting objects should be explicitly listed here
func getCacheOption(operatorNamespace string, isMonitoringAvailable, isOpenshift bool) cache.Options {
func getCacheOption(operatorNamespace string, isMonitoringAvailable, isNativeOpenshift bool, hasOpenshiftConsole bool) cache.Options {
namespaceSelector := fields.Set{"metadata.namespace": operatorNamespace}.AsSelector()
labelSelector := labels.Set{hcoutil.AppLabel: hcoutil.HyperConvergedName}.AsSelector()
labelSelectorForNamespace := labels.Set{hcoutil.KubernetesMetadataName: operatorNamespace}.AsSelector()
Expand Down Expand Up @@ -247,14 +247,7 @@ func getCacheOption(operatorNamespace string, isMonitoringAvailable, isOpenshift
},
}

cacheOptionsByOjectForOpenshift := map[client.Object]cache.ByObject{
&openshiftroutev1.Route{}: {
Field: namespaceSelector,
},
&imagev1.ImageStream{}: {
Label: labelSelector,
},
&openshiftconfigv1.APIServer{}: {},
cacheOptionsByOjectForOpenshiftConsole := map[client.Object]cache.ByObject{
&consolev1.ConsoleCLIDownload{}: {
Label: labelSelector,
},
Expand All @@ -266,18 +259,31 @@ func getCacheOption(operatorNamespace string, isMonitoringAvailable, isOpenshift
},
}

cacheOptionsByOjectForNativeOpenshift := map[client.Object]cache.ByObject{
&openshiftroutev1.Route{}: {
Field: namespaceSelector,
},
&imagev1.ImageStream{}: {
Label: labelSelector,
},
&openshiftconfigv1.APIServer{}: {},
}

if isMonitoringAvailable {
maps.Copy(cacheOptions.ByObject, cacheOptionsByOjectForMonitoring)
}
if isOpenshift {
maps.Copy(cacheOptions.ByObject, cacheOptionsByOjectForOpenshift)
if isNativeOpenshift {
maps.Copy(cacheOptions.ByObject, cacheOptionsByOjectForNativeOpenshift)
}
if hasOpenshiftConsole {
maps.Copy(cacheOptions.ByObject, cacheOptionsByOjectForOpenshiftConsole)
}

return cacheOptions

}

func getManagerOptions(operatorNamespace string, needLeaderElection, isMonitoringAvailable, isOpenshift bool, scheme *apiruntime.Scheme) manager.Options {
func getManagerOptions(operatorNamespace string, needLeaderElection, isMonitoringAvailable, isNativeOpenshift bool, hasOpenshiftConsole bool, scheme *apiruntime.Scheme) manager.Options {
return manager.Options{
Metrics: server.Options{
BindAddress: fmt.Sprintf("%s:%d", hcoutil.MetricsHost, hcoutil.MetricsPort),
Expand All @@ -292,7 +298,7 @@ func getManagerOptions(operatorNamespace string, needLeaderElection, isMonitorin
// "configmapsleases". Therefore, having only "leases" should be safe now.
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaderElectionID: "hyperconverged-cluster-operator-lock",
Cache: getCacheOption(operatorNamespace, isMonitoringAvailable, isOpenshift),
Cache: getCacheOption(operatorNamespace, isMonitoringAvailable, isNativeOpenshift, hasOpenshiftConsole),
Scheme: scheme,
}
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/hyperconverged-cluster-webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ func main() {
err = webhookscontrollers.RegisterReconciler(mgr, ci)
cmdHelper.ExitOnError(err, "Cannot register APIServer reconciler")

if err = webhooks.SetupWebhookWithManager(ctx, mgr, ci.IsOpenshift(), hcoTLSSecurityProfile); err != nil {
if err = webhooks.SetupWebhookWithManager(ctx, mgr, ci.HasOpenshiftConsole(), hcoTLSSecurityProfile); err != nil {
logger.Error(err, "unable to create webhook", "webhook", "HyperConverged")
eventEmitter.EmitEvent(nil, corev1.EventTypeWarning, "InitError", "Unable to create webhook")
os.Exit(1)
Expand Down
2 changes: 1 addition & 1 deletion controllers/alerts/rbac.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func newRoleBinding(owner metav1.OwnerReference, namespace string, ci hcoutil.Cl
}

func getMonitoringNamespace(ci hcoutil.ClusterInfo) string {
if ci.IsOpenshift() {
if ci.IsNativeOpenshift() {
return openshiftMonitoringNamespace
}

Expand Down
15 changes: 12 additions & 3 deletions controllers/commontestutils/testUtils.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,10 @@ type ClusterInfoMock struct{}
func (ClusterInfoMock) Init(_ context.Context, _ client.Client, _ logr.Logger) error {
return nil
}
func (ClusterInfoMock) IsOpenshift() bool {
func (ClusterInfoMock) IsNativeOpenshift() bool {
return true
}
func (ClusterInfoMock) HasOpenshiftConsole() bool {
return true
}
func (ClusterInfoMock) IsRunningLocally() bool {
Expand Down Expand Up @@ -339,7 +342,10 @@ type ClusterInfoSNOMock struct{}
func (ClusterInfoSNOMock) Init(_ context.Context, _ client.Client, _ logr.Logger) error {
return nil
}
func (ClusterInfoSNOMock) IsOpenshift() bool {
func (ClusterInfoSNOMock) IsNativeOpenshift() bool {
return true
}
func (ClusterInfoSNOMock) HasOpenshiftConsole() bool {
return true
}
func (ClusterInfoSNOMock) IsRunningLocally() bool {
Expand Down Expand Up @@ -396,7 +402,10 @@ type ClusterInfoSRCPHAIMock struct{}
func (ClusterInfoSRCPHAIMock) Init(_ context.Context, _ client.Client, _ logr.Logger) error {
return nil
}
func (ClusterInfoSRCPHAIMock) IsOpenshift() bool {
func (ClusterInfoSRCPHAIMock) IsNativeOpenshift() bool {
return true
}
func (ClusterInfoSRCPHAIMock) HasOpenshiftConsole() bool {
return true
}
func (ClusterInfoSRCPHAIMock) IsRunningLocally() bool {
Expand Down
13 changes: 9 additions & 4 deletions controllers/hyperconverged/hyperconverged_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,19 +195,24 @@ func add(mgr manager.Manager, r reconcile.Reconciler, ci hcoutil.ClusterInfo) er
&monitoringv1.PrometheusRule{},
}...)
}
if ci.IsOpenshift() {
if ci.HasOpenshiftConsole() {
// TODO: Ensure we need all of these
secondaryResources = append(secondaryResources, []client.Object{
&sspv1beta2.SSP{},
&corev1.Service{},
&routev1.Route{},
&consolev1.ConsoleCLIDownload{},
&consolev1.ConsoleQuickStart{},
&consolev1.ConsolePlugin{},
&imagev1.ImageStream{},
&corev1.Namespace{},
&appsv1.Deployment{},
}...)
}
if ci.IsNativeOpenshift() {
secondaryResources = append(secondaryResources, []client.Object{
&routev1.Route{},
&imagev1.ImageStream{},
}...)
}

// Watch secondary resources
for _, resource := range secondaryResources {
Expand All @@ -234,7 +239,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, ci hcoutil.ClusterInfo) er
return err
}

if ci.IsOpenshift() {
if ci.IsNativeOpenshift() {
// Watch openshiftconfigv1.APIServer separately
msg := "Reconciling for openshiftconfigv1.APIServer"

Expand Down
18 changes: 12 additions & 6 deletions controllers/operands/operandHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,16 +59,19 @@ func NewOperandHandler(client client.Client, scheme *runtime.Scheme, ci hcoutil.
newAAQHandler(client, scheme),
}

if ci.IsOpenshift() {
if ci.HasOpenshiftConsole() {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure about this whole logic. @tiraboschi - WDYT?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think that the SSP operator (line 64) will work on vanially k8s just because the Openshift Console has been deployed there.

operands = append(operands, []Operand{
(*genericOperand)(newSspHandler(client, scheme)),
(*genericOperand)(newCliDownloadHandler(client, scheme)),
(*genericOperand)(newCliDownloadsRouteHandler(client, scheme)),
(*genericOperand)(newServiceHandler(client, scheme, NewCliDownloadsService)),
}...)
}

if ci.IsOpenshift() && ci.IsConsolePluginImageProvided() {
if ci.IsNativeOpenshift() {
operands = append(operands, (*genericOperand)(newCliDownloadsRouteHandler(client, scheme)))
}

if ci.HasOpenshiftConsole() && ci.IsConsolePluginImageProvided() {
operands = append(operands, newConsoleHandler(client))
operands = append(operands, (*genericOperand)(newServiceHandler(client, scheme, NewKvUIPluginSvc)))
operands = append(operands, (*genericOperand)(newServiceHandler(client, scheme, NewKvUIProxySvc)))
Expand All @@ -90,16 +93,19 @@ func NewOperandHandler(client client.Client, scheme *runtime.Scheme, ci hcoutil.
// Initial operations that need to read/write from the cluster can only be done when the client is already working.
func (h *OperandHandler) FirstUseInitiation(scheme *runtime.Scheme, ci hcoutil.ClusterInfo, hc *hcov1beta1.HyperConverged) {
h.objects = make([]client.Object, 0)
if ci.IsOpenshift() {
if ci.HasOpenshiftConsole() {
h.addOperands(scheme, hc, getQuickStartHandlers)
h.addOperands(scheme, hc, getDashboardHandlers)
h.addOperands(scheme, hc, getImageStreamHandlers)
h.addOperands(scheme, hc, newVirtioWinCmHandler)
h.addOperands(scheme, hc, newVirtioWinCmReaderRoleHandler)
h.addOperands(scheme, hc, newVirtioWinCmReaderRoleBindingHandler)
}

if ci.IsNativeOpenshift() {
h.addOperands(scheme, hc, getImageStreamHandlers)
}

if ci.IsOpenshift() && ci.IsConsolePluginImageProvided() {
if ci.HasOpenshiftConsole() && ci.IsConsolePluginImageProvided() {
h.addOperands(scheme, hc, newKvUIPluginDeploymentHandler)
h.addOperands(scheme, hc, newKvUIProxyDeploymentHandler)
h.addOperands(scheme, hc, newKvUINginxCMHandler)
Expand Down
2 changes: 1 addition & 1 deletion controllers/webhooks/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func (r *ReconcileAPIServer) Reconcile(ctx context.Context, _ reconcile.Request)

// RegisterReconciler creates a new HyperConverged Reconciler and registers it into manager.
func RegisterReconciler(mgr manager.Manager, ci hcoutil.ClusterInfo) error {
if ci.IsOpenshift() {
if ci.IsNativeOpenshift() {
return add(mgr, newReconciler(mgr, ci))
}
return nil
Expand Down
31 changes: 20 additions & 11 deletions pkg/util/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ import (

type ClusterInfo interface {
Init(ctx context.Context, cl client.Client, logger logr.Logger) error
IsOpenshift() bool
IsNativeOpenshift() bool
HasOpenshiftConsole() bool
IsRunningLocally() bool
GetDomain() string
GetBaseDomain() string
Expand All @@ -43,7 +44,8 @@ type ClusterInfo interface {
}

type ClusterInfoImp struct {
runningInOpenshift bool
runningInNativeOpenshift bool
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wouldn't change the name of this field. We want to know if HCO is running in an openshift cluster or not, and this the old name is accurate. Also, I don't know what "native openshift" means.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I'll revert the name change 👍

runningWithOpenshiftConsole bool
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

HCO and the openshift console are not actually related. so it's not like HCO is running with the console. What do you think about something like openshiftConsoleInstalled?

managedByOLM bool
runningLocally bool
controlPlaneHighlyAvailable bool
Expand Down Expand Up @@ -78,15 +80,15 @@ func (c *ClusterInfoImp) Init(ctx context.Context, cl client.Client, logger logr
// We assume that this Operator is managed by OLM when this variable is present.
_, c.managedByOLM = os.LookupEnv(OperatorConditionNameEnvVar)

if c.runningInOpenshift {
if c.runningInNativeOpenshift {
err = c.initOpenshift(ctx, cl)
} else {
err = c.initKubernetes(cl)
}
if err != nil {
return err
}
if c.runningInOpenshift && c.singlestackipv6 {
if c.runningInNativeOpenshift && c.singlestackipv6 {
metrics.SetHCOMetricSingleStackIPv6True()
}

Expand Down Expand Up @@ -179,8 +181,12 @@ func (c *ClusterInfoImp) IsManagedByOLM() bool {
return c.managedByOLM
}

func (c *ClusterInfoImp) IsOpenshift() bool {
return c.runningInOpenshift
func (c *ClusterInfoImp) IsNativeOpenshift() bool {
return c.runningInNativeOpenshift
}

func (c *ClusterInfoImp) HasOpenshiftConsole() bool {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same: How do you feel about something like IsOpenshifConsoleInstalled()?

return c.runningInNativeOpenshift || c.runningWithOpenshiftConsole
}

func (c *ClusterInfoImp) IsConsolePluginImageProvided() bool {
Expand Down Expand Up @@ -268,8 +274,9 @@ func isCRDExists(ctx context.Context, cl client.Client, crdName string) bool {

func init() {
clusterInfo = &ClusterInfoImp{
runningLocally: IsRunModeLocal(),
runningInOpenshift: false,
runningLocally: IsRunModeLocal(),
runningInNativeOpenshift: false,
runningWithOpenshiftConsole: false,
}
}

Expand All @@ -284,14 +291,16 @@ func (c *ClusterInfoImp) queryCluster(ctx context.Context, cl client.Client) err
var gdferr *discovery.ErrGroupDiscoveryFailed
if meta.IsNoMatchError(err) || apierrors.IsNotFound(err) || errors.As(err, &gdferr) {
// Not on OpenShift
c.runningInOpenshift = false
c.runningInNativeOpenshift = false
c.logger.Info("Cluster type = kubernetes")
c.runningWithOpenshiftConsole = isCRDExists(ctx, cl, OpenShiftConsolePluginCRDName)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is that enough? Shouldn't we also check that the CR also exists?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The OpenShiftConsolePlugin custom resource allows us to register a console plugin. I don't think we need to check if any CR exists as long as the CRD exists.

AaronDewes marked this conversation as resolved.
Show resolved Hide resolved
} else {
c.logger.Error(err, "Failed to get ClusterVersion")
return err
}
} else {
c.runningInOpenshift = true
c.runningInNativeOpenshift = true
c.runningWithOpenshiftConsole = true
c.logger.Info("Cluster type = openshift", "version", clusterVersion.Status.Desired.Version)
c.domain, err = getClusterDomain(ctx, cl)
if err != nil {
Expand All @@ -318,7 +327,7 @@ func (c *ClusterInfoImp) GetTLSSecurityProfile(hcoTLSSecurityProfile *openshiftc
}

func (c *ClusterInfoImp) RefreshAPIServerCR(ctx context.Context, cl client.Client) error {
if c.IsOpenshift() {
if c.IsNativeOpenshift() {
instance := &openshiftconfigv1.APIServer{}

key := client.ObjectKey{Namespace: UndefinedNamespace, Name: APIServerCRName}
Expand Down
1 change: 1 addition & 0 deletions pkg/util/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ const (
HcoMutatingWebhookNS = "mutate-ns-hco.kubevirt.io"
PrometheusRuleCRDName = "prometheusrules.monitoring.coreos.com"
ServiceMonitorCRDName = "servicemonitors.monitoring.coreos.com"
OpenShiftConsolePluginCRDName = "consoleplugins.console.openshift.io"
HcoMutatingWebhookHyperConverged = "mutate-hyperconverged-hco.kubevirt.io"
AppLabel = "app"
UndefinedNamespace = ""
Expand Down
4 changes: 2 additions & 2 deletions pkg/webhooks/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var (
logger = logf.Log.WithName("webhook-setup")
)

func SetupWebhookWithManager(ctx context.Context, mgr ctrl.Manager, isOpenshift bool, hcoTLSSecurityProfile *openshiftconfigv1.TLSSecurityProfile) error {
func SetupWebhookWithManager(ctx context.Context, mgr ctrl.Manager, isOpenshiftConsole bool, hcoTLSSecurityProfile *openshiftconfigv1.TLSSecurityProfile) error {
operatorNsEnv, nserr := hcoutil.GetOperatorNamespaceFromEnv()
if nserr != nil {
logger.Error(nserr, "failed to get operator namespace from the environment")
Expand All @@ -37,7 +37,7 @@ func SetupWebhookWithManager(ctx context.Context, mgr ctrl.Manager, isOpenshift

decoder := admission.NewDecoder(mgr.GetScheme())

whHandler := validator.NewWebhookHandler(logger, mgr.GetClient(), decoder, operatorNsEnv, isOpenshift, hcoTLSSecurityProfile)
whHandler := validator.NewWebhookHandler(logger, mgr.GetClient(), decoder, operatorNsEnv, isOpenshiftConsole, hcoTLSSecurityProfile)
nsMutator := mutator.NewNsMutator(mgr.GetClient(), decoder, operatorNsEnv)
hyperConvergedMutator := mutator.NewHyperConvergedMutator(mgr.GetClient(), decoder)

Expand Down
16 changes: 8 additions & 8 deletions pkg/webhooks/validator/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,22 +35,22 @@ const (
)

type WebhookHandler struct {
logger logr.Logger
cli client.Client
namespace string
isOpenshift bool
decoder admission.Decoder
logger logr.Logger
cli client.Client
namespace string
isOpenshiftConsole bool
decoder admission.Decoder
}

var hcoTLSConfigCache *openshiftconfigv1.TLSSecurityProfile

func NewWebhookHandler(logger logr.Logger, cli client.Client, decoder admission.Decoder, namespace string, isOpenshift bool, hcoTLSSecurityProfile *openshiftconfigv1.TLSSecurityProfile) *WebhookHandler {
func NewWebhookHandler(logger logr.Logger, cli client.Client, decoder admission.Decoder, namespace string, isOpenshiftConsole bool, hcoTLSSecurityProfile *openshiftconfigv1.TLSSecurityProfile) *WebhookHandler {
hcoTLSConfigCache = hcoTLSSecurityProfile
return &WebhookHandler{
logger: logger,
cli: cli,
namespace: namespace,
isOpenshift: isOpenshift,
isOpenshiftConsole: isOpenshiftConsole,
decoder: decoder,
}
}
Expand Down Expand Up @@ -216,7 +216,7 @@ func (wh *WebhookHandler) ValidateUpdate(ctx context.Context, dryrun bool, reque
cna,
}

if wh.isOpenshift {
if wh.isOpenshiftConsole {
ssp, _, err := operands.NewSSP(requested)
if err != nil {
return err
Expand Down