diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index eef0a3633b..ac23a9aa35 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -21,9 +21,6 @@ import ( "fmt" "reflect" - "github.com/go-logr/logr" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/utils/openstack/clientconfig" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -51,6 +48,7 @@ import ( "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/provider" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) // OpenStackClusterReconciler reconciles a OpenStackCluster object. @@ -109,28 +107,34 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req } }() + osProviderClient, clientOpts, err := provider.NewClientFromCluster(ctx, r.Client, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + + scope := &scope.Scope{ + ProviderClient: osProviderClient, + ProviderClientOpts: clientOpts, + Logger: log, + } + // Handle deleted clusters if !openStackCluster.DeletionTimestamp.IsZero() { - return reconcileDelete(ctx, log, r.Client, patchHelper, cluster, openStackCluster) + return reconcileDelete(ctx, scope, patchHelper, cluster, openStackCluster) } // Handle non-deleted clusters - return reconcileNormal(ctx, log, r.Client, patchHelper, cluster, openStackCluster) + return reconcileNormal(ctx, scope, patchHelper, cluster, openStackCluster) } -func reconcileDelete(ctx context.Context, log logr.Logger, client client.Client, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { - log.Info("Reconciling Cluster delete") - - osProviderClient, clientOpts, err := provider.NewClientFromCluster(ctx, client, openStackCluster) - if err != nil { - return reconcile.Result{}, err - } +func reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { + scope.Logger.Info("Reconciling Cluster delete") - if err = deleteBastion(log, osProviderClient, clientOpts, cluster, openStackCluster); err != nil { + if err := deleteBastion(scope, cluster, openStackCluster); err != nil { return reconcile.Result{}, err } - networkingService, err := networking.NewService(osProviderClient, clientOpts, log) + networkingService, err := networking.NewService(scope) if err != nil { return reconcile.Result{}, err } @@ -138,7 +142,7 @@ func reconcileDelete(ctx context.Context, log logr.Logger, client client.Client, clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) if openStackCluster.Spec.ManagedAPIServerLoadBalancer { - loadBalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, log) + loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { return reconcile.Result{}, err } @@ -169,7 +173,7 @@ func reconcileDelete(ctx context.Context, log logr.Logger, client client.Client, // Cluster is deleted so remove the finalizer. controllerutil.RemoveFinalizer(openStackCluster, infrav1.ClusterFinalizer) - log.Info("Reconciled Cluster delete successfully") + scope.Logger.Info("Reconciled Cluster delete successfully") if err := patchHelper.Patch(ctx, openStackCluster); err != nil { return ctrl.Result{}, err } @@ -185,12 +189,12 @@ func contains(arr []string, target string) bool { return false } -func deleteBastion(log logr.Logger, osProviderClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { - computeService, err := compute.NewService(osProviderClient, clientOpts, log) +func deleteBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { + computeService, err := compute.NewService(scope) if err != nil { return err } - networkingService, err := networking.NewService(osProviderClient, clientOpts, log) + networkingService, err := networking.NewService(scope) if err != nil { return err } @@ -235,8 +239,8 @@ func deleteBastion(log logr.Logger, osProviderClient *gophercloud.ProviderClient return nil } -func reconcileNormal(ctx context.Context, log logr.Logger, client client.Client, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { - log.Info("Reconciling Cluster") +func reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { + scope.Logger.Info("Reconciling Cluster") // If the OpenStackCluster doesn't have our finalizer, add it. controllerutil.AddFinalizer(openStackCluster, infrav1.ClusterFinalizer) @@ -245,22 +249,17 @@ func reconcileNormal(ctx context.Context, log logr.Logger, client client.Client, return reconcile.Result{}, err } - osProviderClient, clientOpts, err := provider.NewClientFromCluster(ctx, client, openStackCluster) - if err != nil { - return reconcile.Result{}, err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts, log) + computeService, err := compute.NewService(scope) if err != nil { return reconcile.Result{}, err } - err = reconcileNetworkComponents(log, osProviderClient, clientOpts, cluster, openStackCluster) + err = reconcileNetworkComponents(scope, cluster, openStackCluster) if err != nil { return reconcile.Result{}, err } - if err = reconcileBastion(log, osProviderClient, clientOpts, cluster, openStackCluster); err != nil { + if err = reconcileBastion(scope, cluster, openStackCluster); err != nil { return reconcile.Result{}, err } @@ -292,18 +291,18 @@ func reconcileNormal(ctx context.Context, log logr.Logger, client client.Client, openStackCluster.Status.Ready = true openStackCluster.Status.FailureMessage = nil openStackCluster.Status.FailureReason = nil - log.Info("Reconciled Cluster create successfully") + scope.Logger.Info("Reconciled Cluster create successfully") return reconcile.Result{}, nil } -func reconcileBastion(log logr.Logger, osProviderClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { - log.Info("Reconciling Bastion") +func reconcileBastion(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { + scope.Logger.Info("Reconciling Bastion") if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled { - return deleteBastion(log, osProviderClient, clientOpts, cluster, openStackCluster) + return deleteBastion(scope, cluster, openStackCluster) } - computeService, err := compute.NewService(osProviderClient, clientOpts, log) + computeService, err := compute.NewService(scope) if err != nil { return err } @@ -326,7 +325,7 @@ func reconcileBastion(log logr.Logger, osProviderClient *gophercloud.ProviderCli return errors.Errorf("failed to reconcile bastion: %v", err) } - networkingService, err := networking.NewService(osProviderClient, clientOpts, log) + networkingService, err := networking.NewService(scope) if err != nil { return err } @@ -357,15 +356,15 @@ func reconcileBastion(log logr.Logger, osProviderClient *gophercloud.ProviderCli return nil } -func reconcileNetworkComponents(log logr.Logger, osProviderClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { +func reconcileNetworkComponents(scope *scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error { clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) - networkingService, err := networking.NewService(osProviderClient, clientOpts, log) + networkingService, err := networking.NewService(scope) if err != nil { return err } - log.Info("Reconciling network components") + scope.Logger.Info("Reconciling network components") err = networkingService.ReconcileExternalNetwork(openStackCluster) if err != nil { @@ -374,7 +373,7 @@ func reconcileNetworkComponents(log logr.Logger, osProviderClient *gophercloud.P } if openStackCluster.Spec.NodeCIDR == "" { - log.V(4).Info("No need to reconcile network, searching network and subnet instead") + scope.Logger.V(4).Info("No need to reconcile network, searching network and subnet instead") netOpts := openStackCluster.Spec.Network.ToListOpt() networkList, err := networkingService.GetNetworksByFilter(&netOpts) @@ -450,7 +449,7 @@ func reconcileNetworkComponents(log logr.Logger, osProviderClient *gophercloud.P } if openStackCluster.Spec.ManagedAPIServerLoadBalancer { - loadBalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, log) + loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { return err } diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index a84e5379e9..37e3d609ad 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -24,8 +24,6 @@ import ( "time" "github.com/go-logr/logr" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/utils/openstack/clientconfig" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" @@ -53,6 +52,7 @@ import ( "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/provider" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) // OpenStackMachineReconciler reconciles a OpenStackMachine object. @@ -140,13 +140,24 @@ func (r *OpenStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req } }() + osProviderClient, clientOpts, err := provider.NewClientFromMachine(ctx, r.Client, openStackMachine) + if err != nil { + return reconcile.Result{}, err + } + + scope := &scope.Scope{ + ProviderClient: osProviderClient, + ProviderClientOpts: clientOpts, + Logger: log, + } + // Handle deleted machines if !openStackMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, log, patchHelper, cluster, infraCluster, machine, openStackMachine) + return r.reconcileDelete(ctx, scope, patchHelper, cluster, infraCluster, machine, openStackMachine) } // Handle non-deleted clusters - return r.reconcileNormal(ctx, log, patchHelper, cluster, infraCluster, machine, openStackMachine) + return r.reconcileNormal(ctx, scope, patchHelper, cluster, infraCluster, machine, openStackMachine) } func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -186,28 +197,23 @@ func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr c Complete(r) } -func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, logger logr.Logger, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { - logger.Info("Reconciling Machine delete") +func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { + scope.Logger.Info("Reconciling Machine delete") clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) - osProviderClient, clientOpts, err := provider.NewClientFromMachine(ctx, r.Client, openStackMachine) - if err != nil { - return ctrl.Result{}, err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts, logger) + computeService, err := compute.NewService(scope) if err != nil { return ctrl.Result{}, err } - networkingService, err := networking.NewService(osProviderClient, clientOpts, logger) + networkingService, err := networking.NewService(scope) if err != nil { return ctrl.Result{}, err } if openStackCluster.Spec.ManagedAPIServerLoadBalancer { - loadBalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, logger) + loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { return ctrl.Result{}, err } @@ -226,7 +232,7 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, logger if instanceStatus != nil { instanceNS, err := instanceStatus.NetworkStatus() if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("error getting network status for OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error getting network status for OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) return ctrl.Result{}, nil } @@ -234,7 +240,7 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, logger for _, address := range addresses { if address.Type == corev1.NodeExternalIP { if err = networkingService.DeleteFloatingIP(openStackMachine, address.Address); err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("error deleting Openstack floating IP: %v", err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error deleting Openstack floating IP: %v", err)) return ctrl.Result{}, nil } } @@ -243,22 +249,22 @@ func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, logger } if err := computeService.DeleteInstance(openStackMachine, &openStackMachine.Spec, openStackMachine.Name, instanceStatus); err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("error deleting OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("error deleting OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) return ctrl.Result{}, nil } controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer) - logger.Info("Reconciled Machine delete successfully") + scope.Logger.Info("Reconciled Machine delete successfully") if err := patchHelper.Patch(ctx, openStackMachine); err != nil { return ctrl.Result{}, err } return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, logger logr.Logger, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) { +func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope *scope.Scope, patchHelper *patch.Helper, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) { // If the OpenStackMachine is in an error state, return early. if openStackMachine.Status.FailureReason != nil || openStackMachine.Status.FailureMessage != nil { - logger.Info("Error state detected, skipping reconciliation") + scope.Logger.Info("Error state detected, skipping reconciliation") return ctrl.Result{}, nil } @@ -270,47 +276,42 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, logger } if !cluster.Status.InfrastructureReady { - logger.Info("Cluster infrastructure is not ready yet, requeuing machine") + scope.Logger.Info("Cluster infrastructure is not ready yet, requeuing machine") return ctrl.Result{RequeueAfter: waitForClusterInfrastructureReadyDuration}, nil } // Make sure bootstrap data is available and populated. if machine.Spec.Bootstrap.DataSecretName == nil { - logger.Info("Bootstrap data secret reference is not yet available") + scope.Logger.Info("Bootstrap data secret reference is not yet available") return ctrl.Result{}, nil } userData, err := r.getBootstrapData(ctx, machine, openStackMachine) if err != nil { return ctrl.Result{}, err } - logger.Info("Reconciling Machine") + scope.Logger.Info("Reconciling Machine") clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) - osProviderClient, clientOpts, err := provider.NewClientFromMachine(ctx, r.Client, openStackMachine) - if err != nil { - return ctrl.Result{}, err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts, logger) + computeService, err := compute.NewService(scope) if err != nil { return ctrl.Result{}, err } - networkingService, err := networking.NewService(osProviderClient, clientOpts, logger) + networkingService, err := networking.NewService(scope) if err != nil { return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, userData) + instanceStatus, err := r.getOrCreate(scope.Logger, cluster, openStackCluster, machine, openStackMachine, computeService, userData) if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance cannot be created: %v", err)) return ctrl.Result{}, err } // Set an error message if we couldn't find the instance. if instanceStatus == nil { - handleUpdateMachineError(logger, openStackMachine, errors.New("OpenStack instance cannot be found")) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.New("OpenStack instance cannot be found")) return ctrl.Result{}, nil } @@ -324,7 +325,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, logger instanceNS, err := instanceStatus.NetworkStatus() if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("Unable to get network status for OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("Unable to get network status for OpenStack instance %s with ID %s: %v", instanceStatus.Name(), instanceStatus.ID(), err)) return ctrl.Result{}, nil } @@ -333,27 +334,27 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, logger switch instanceStatus.State() { case infrav1.InstanceStateActive: - logger.Info("Machine instance is ACTIVE", "instance-id", instanceStatus.ID()) + scope.Logger.Info("Machine instance is ACTIVE", "instance-id", instanceStatus.ID()) openStackMachine.Status.Ready = true case infrav1.InstanceStateError: // Error is unexpected, thus we report error and never retry - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("OpenStack instance state %q is unexpected", instanceStatus.State())) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("OpenStack instance state %q is unexpected", instanceStatus.State())) return ctrl.Result{}, nil case infrav1.InstanceStateDeleted: // we should avoid further actions for DELETED VM - logger.Info("Instance state is DELETED, no actions") + scope.Logger.Info("Instance state is DELETED, no actions") return ctrl.Result{}, nil default: // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE // due to potential conflict or unexpected actions - logger.Info("Waiting for instance to become ACTIVE", "instance-id", instanceStatus.ID(), "status", instanceStatus.State()) + scope.Logger.Info("Waiting for instance to become ACTIVE", "instance-id", instanceStatus.ID(), "status", instanceStatus.State()) return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil } if openStackCluster.Spec.ManagedAPIServerLoadBalancer { - err = r.reconcileLoadBalancerMember(logger, osProviderClient, clientOpts, openStackCluster, machine, openStackMachine, instanceNS, clusterName) + err = r.reconcileLoadBalancerMember(scope, openStackCluster, machine, openStackMachine, instanceNS, clusterName) if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("LoadBalancerMember cannot be reconciled: %v", err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("LoadBalancerMember cannot be reconciled: %v", err)) return ctrl.Result{}, nil } } else if util.IsControlPlaneMachine(machine) && !openStackCluster.Spec.DisableAPIServerFloatingIP { @@ -363,23 +364,23 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, logger } fp, err := networkingService.GetOrCreateFloatingIP(openStackMachine, openStackCluster, clusterName, floatingIPAddress) if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("Floating IP cannot be got or created: %v", err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("Floating IP cannot be got or created: %v", err)) return ctrl.Result{}, nil } port, err := computeService.GetManagementPort(openStackCluster, instanceStatus) if err != nil { err = errors.Errorf("getting management port for control plane machine %s: %v", machine.Name, err) - handleUpdateMachineError(logger, openStackMachine, err) + handleUpdateMachineError(scope.Logger, openStackMachine, err) return ctrl.Result{}, nil } err = networkingService.AssociateFloatingIP(openStackMachine, fp, port.ID) if err != nil { - handleUpdateMachineError(logger, openStackMachine, errors.Errorf("Floating IP cannot be associated: %v", err)) + handleUpdateMachineError(scope.Logger, openStackMachine, errors.Errorf("Floating IP cannot be associated: %v", err)) return ctrl.Result{}, nil } } - logger.Info("Reconciled Machine create successfully") + scope.Logger.Info("Reconciled Machine create successfully") return ctrl.Result{}, nil } @@ -408,9 +409,9 @@ func handleUpdateMachineError(logger logr.Logger, openstackMachine *infrav1.Open logger.Error(fmt.Errorf(string(err)), message.Error()) } -func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(logger logr.Logger, osProviderClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error { +func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope *scope.Scope, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error { ip := instanceNS.IP(openStackCluster.Status.Network.Name) - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, logger) + loadbalancerService, err := loadbalancer.NewService(scope) if err != nil { return err } diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index 6e1ab64ce6..d170287888 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -217,7 +217,7 @@ func (s *Service) createInstance(eventObject runtime.Object, clusterName string, } if err := s.deletePorts(eventObject, portList); err != nil { - s.logger.V(4).Error(err, "failed to clean up ports after failure", "cluster", clusterName, "machine", instanceSpec.Name) + s.scope.Logger.V(4).Error(err, "failed to clean up ports after failure", "cluster", clusterName, "machine", instanceSpec.Name) } }() @@ -381,7 +381,7 @@ func (s *Service) getOrCreateRootVolume(eventObject runtime.Object, instanceSpec return nil, fmt.Errorf("exected to find volume %s with size %d; found size %d", name, size, volume.Size) } - s.logger.Info("using existing root volume %s", name) + s.scope.Logger.Info("using existing root volume %s", name) return volume, nil } @@ -596,7 +596,7 @@ func (s *Service) DeleteInstance(eventObject runtime.Object, openStackMachineSpe return nil } - s.logger.Info("deleting dangling root volume %s(%s)", volume.Name, volume.ID) + s.scope.Logger.Info("deleting dangling root volume %s(%s)", volume.Name, volume.ID) return s.computeService.DeleteVolume(volume.ID, volumes.DeleteOpts{}) } @@ -721,7 +721,7 @@ func (s *Service) GetInstanceStatus(resourceID string) (instance *InstanceStatus return nil, fmt.Errorf("get server %q detail failed: %v", resourceID, err) } - return &InstanceStatus{server, s.logger}, nil + return &InstanceStatus{server, s.scope.Logger}, nil } func (s *Service) GetInstanceStatusByName(eventObject runtime.Object, name string) (instance *InstanceStatus, err error) { @@ -748,7 +748,7 @@ func (s *Service) GetInstanceStatusByName(eventObject runtime.Object, name strin // Return the first returned server, if any for i := range serverList { - return &InstanceStatus{&serverList[i], s.logger}, nil + return &InstanceStatus{&serverList[i], s.scope.Logger}, nil } return nil, nil } diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index 5543f4a757..cccb1418c6 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -49,6 +49,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking/mock_networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) type gomegaMockMatcher struct { @@ -496,10 +497,12 @@ func TestService_getImageID(t *testing.T) { tt.expect(mockComputeClient.EXPECT()) s := Service{ - projectID: "", + projectID: "", + scope: &scope.Scope{ + Logger: logr.Discard(), + }, computeService: mockComputeClient, networkingService: &networking.Service{}, - logger: logr.Discard(), } got, err := s.getImageID(tt.imageUUID, tt.imageName) @@ -1116,12 +1119,14 @@ func TestService_CreateInstance(t *testing.T) { tt.expect(computeRecorder, networkRecorder) s := Service{ - projectID: "", + projectID: "", + scope: &scope.Scope{ + Logger: logr.Discard(), + }, computeService: mockComputeClient, networkingService: networking.NewTestService( "", mockNetworkClient, logr.Discard(), ), - logger: logr.Discard(), } // Call CreateInstance with a reduced retry interval to speed up the test _, err := s.createInstanceImpl(tt.getOpenStackCluster(), tt.getMachine(), tt.getOpenStackMachine(), "cluster-name", "user-data", time.Second) @@ -1238,12 +1243,14 @@ func TestService_DeleteInstance(t *testing.T) { tt.expect(computeRecorder, networkRecorder) s := Service{ - projectID: "", + projectID: "", + scope: &scope.Scope{ + Logger: logr.Discard(), + }, computeService: mockComputeClient, networkingService: networking.NewTestService( "", mockNetworkClient, logr.Discard(), ), - logger: logr.Discard(), } if err := s.DeleteInstance(tt.eventObject, tt.getOpenStackMachineSpec(), instanceName, tt.getInstanceStatus()); (err != nil) != tt.wantErr { t.Errorf("Service.DeleteInstance() error = %v, wantErr %v", err, tt.wantErr) diff --git a/pkg/cloud/services/compute/service.go b/pkg/cloud/services/compute/service.go index f06a6f9be7..a5b714ba9d 100644 --- a/pkg/cloud/services/compute/service.go +++ b/pkg/cloud/services/compute/service.go @@ -19,20 +19,19 @@ package compute import ( "fmt" - "github.com/go-logr/logr" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/utils/openstack/clientconfig" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/provider" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) type Service struct { projectID string + scope *scope.Scope computeService Client networkingService *networking.Service - logger logr.Logger } /* @@ -47,24 +46,24 @@ type Service struct { const NovaMinimumMicroversion = "2.53" // NewService returns an instance of the compute service. -func NewService(client *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, logger logr.Logger) (*Service, error) { - computeClient, err := openstack.NewComputeV2(client, gophercloud.EndpointOpts{ - Region: clientOpts.RegionName, +func NewService(scope *scope.Scope) (*Service, error) { + computeClient, err := openstack.NewComputeV2(scope.ProviderClient, gophercloud.EndpointOpts{ + Region: scope.ProviderClientOpts.RegionName, }) if err != nil { return nil, fmt.Errorf("failed to create compute service client: %v", err) } computeClient.Microversion = NovaMinimumMicroversion - imagesClient, err := openstack.NewImageServiceV2(client, gophercloud.EndpointOpts{ - Region: clientOpts.RegionName, + imagesClient, err := openstack.NewImageServiceV2(scope.ProviderClient, gophercloud.EndpointOpts{ + Region: scope.ProviderClientOpts.RegionName, }) if err != nil { return nil, fmt.Errorf("failed to create image service client: %v", err) } - volumeClient, err := openstack.NewBlockStorageV3(client, gophercloud.EndpointOpts{ - Region: clientOpts.RegionName, + volumeClient, err := openstack.NewBlockStorageV3(scope.ProviderClient, gophercloud.EndpointOpts{ + Region: scope.ProviderClientOpts.RegionName, }) if err != nil { return nil, fmt.Errorf("failed to create volume service client: %v", err) @@ -72,24 +71,24 @@ func NewService(client *gophercloud.ProviderClient, clientOpts *clientconfig.Cli computeService := serviceClient{computeClient, imagesClient, volumeClient} - if clientOpts.AuthInfo == nil { + if scope.ProviderClientOpts.AuthInfo == nil { return nil, fmt.Errorf("authInfo must be set") } - projectID, err := provider.GetProjectID(client, clientOpts) + projectID, err := provider.GetProjectID(scope.ProviderClient, scope.ProviderClientOpts) if err != nil { return nil, fmt.Errorf("error retrieveing project id: %v", err) } - networkingService, err := networking.NewService(client, clientOpts, logger) + networkingService, err := networking.NewService(scope) if err != nil { return nil, fmt.Errorf("failed to create networking service: %v", err) } return &Service{ projectID: projectID, + scope: scope, computeService: computeService, networkingService: networkingService, - logger: logger, }, nil } diff --git a/pkg/cloud/services/loadbalancer/loadbalancer.go b/pkg/cloud/services/loadbalancer/loadbalancer.go index e343a74bfd..42b53469f2 100644 --- a/pkg/cloud/services/loadbalancer/loadbalancer.go +++ b/pkg/cloud/services/loadbalancer/loadbalancer.go @@ -44,7 +44,7 @@ const loadBalancerProvisioningStatusActive = "ACTIVE" func (s *Service) ReconcileLoadBalancer(openStackCluster *infrav1.OpenStackCluster, clusterName string, apiServerPort int) error { loadBalancerName := getLoadBalancerName(clusterName) - s.logger.Info("Reconciling load balancer", "name", loadBalancerName) + s.scope.Logger.Info("Reconciling load balancer", "name", loadBalancerName) var fixedIPAddress string switch { @@ -119,7 +119,7 @@ func (s *Service) getOrCreateLoadBalancer(openStackCluster *infrav1.OpenStackClu return lb, nil } - s.logger.Info(fmt.Sprintf("Creating load balancer in subnet: %q", subnetID), "name", loadBalancerName) + s.scope.Logger.Info(fmt.Sprintf("Creating load balancer in subnet: %q", subnetID), "name", loadBalancerName) lbCreateOpts := loadbalancers.CreateOpts{ Name: loadBalancerName, @@ -152,7 +152,7 @@ func (s *Service) getOrCreateListener(openStackCluster *infrav1.OpenStackCluster return listener, nil } - s.logger.Info("Creating load balancer listener", "name", listenerName, "lb-id", lbID) + s.scope.Logger.Info("Creating load balancer listener", "name", listenerName, "lb-id", lbID) listenerCreateOpts := listeners.CreateOpts{ Name: listenerName, @@ -190,7 +190,7 @@ func (s *Service) getOrCreatePool(openStackCluster *infrav1.OpenStackCluster, po return pool, nil } - s.logger.Info(fmt.Sprintf("Creating load balancer pool for listener %q", listenerID), "name", poolName, "lb-id", lbID) + s.scope.Logger.Info(fmt.Sprintf("Creating load balancer pool for listener %q", listenerID), "name", poolName, "lb-id", lbID) poolCreateOpts := pools.CreateOpts{ Name: poolName, @@ -223,7 +223,7 @@ func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster, return nil } - s.logger.Info(fmt.Sprintf("Creating load balancer monitor for pool %q", poolID), "name", monitorName, "lb-id", lbID) + s.scope.Logger.Info(fmt.Sprintf("Creating load balancer monitor for pool %q", poolID), "name", monitorName, "lb-id", lbID) monitorCreateOpts := monitors.CreateOpts{ Name: monitorName, @@ -264,7 +264,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac } loadBalancerName := getLoadBalancerName(clusterName) - s.logger.Info("Reconciling load balancer member", "name", loadBalancerName) + s.scope.Logger.Info("Reconciling load balancer member", "name", loadBalancerName) lbID := openStackCluster.Status.Network.APIServerLoadBalancer.ID portList := []int{int(openStackCluster.Spec.ControlPlaneEndpoint.Port)} @@ -293,7 +293,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac continue } - s.logger.Info("Deleting load balancer member (because the IP of the machine changed)", "name", name) + s.scope.Logger.Info("Deleting load balancer member (because the IP of the machine changed)", "name", name) // lb member changed so let's delete it so we can create it again with the correct IP err = s.waitForLoadBalancerActive(lbID) @@ -309,7 +309,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac } } - s.logger.Info("Creating load balancer member", "name", name) + s.scope.Logger.Info("Creating load balancer member", "name", name) // if we got to this point we should either create or re-create the lb member lbMemberOpts := pools.CreateMemberOpts{ @@ -363,7 +363,7 @@ func (s *Service) DeleteLoadBalancer(openStackCluster *infrav1.OpenStackCluster, deleteOpts := loadbalancers.DeleteOpts{ Cascade: true, } - s.logger.Info("Deleting load balancer", "name", loadBalancerName, "cascade", deleteOpts.Cascade) + s.scope.Logger.Info("Deleting load balancer", "name", loadBalancerName, "cascade", deleteOpts.Cascade) err = s.loadbalancerClient.DeleteLoadBalancer(lb.ID, deleteOpts) if err != nil && !capoerrors.IsNotFound(err) { record.Warnf(openStackCluster, "FailedDeleteLoadBalancer", "Failed to delete load balancer %s with id %s: %v", lb.Name, lb.ID, err) @@ -402,7 +402,7 @@ func (s *Service) DeleteLoadBalancerMember(openStackCluster *infrav1.OpenStackCl return err } if pool == nil { - s.logger.Info("Load balancer pool does not exist", "name", lbPortObjectsName) + s.scope.Logger.Info("Load balancer pool does not exist", "name", lbPortObjectsName) continue } @@ -497,7 +497,7 @@ var backoff = wait.Backoff{ // Possible LoadBalancer states are documented here: https://docs.openstack.org/api-ref/load-balancer/v2/index.html#prov-status func (s *Service) waitForLoadBalancerActive(id string) error { - s.logger.Info("Waiting for load balancer", "id", id, "targetStatus", "ACTIVE") + s.scope.Logger.Info("Waiting for load balancer", "id", id, "targetStatus", "ACTIVE") return wait.ExponentialBackoff(backoff, func() (bool, error) { lb, err := s.loadbalancerClient.GetLoadBalancer(id) if err != nil { @@ -508,7 +508,7 @@ func (s *Service) waitForLoadBalancerActive(id string) error { } func (s *Service) waitForListener(id, target string) error { - s.logger.Info("Waiting for load balancer listener", "id", id, "targetStatus", target) + s.scope.Logger.Info("Waiting for load balancer listener", "id", id, "targetStatus", target) return wait.ExponentialBackoff(backoff, func() (bool, error) { _, err := s.loadbalancerClient.GetListener(id) if err != nil { diff --git a/pkg/cloud/services/loadbalancer/service.go b/pkg/cloud/services/loadbalancer/service.go index a692faabdb..a16d83c2a4 100644 --- a/pkg/cloud/services/loadbalancer/service.go +++ b/pkg/cloud/services/loadbalancer/service.go @@ -22,39 +22,39 @@ import ( "github.com/go-logr/logr" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/utils/openstack/clientconfig" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) // Service interfaces with the OpenStack Neutron LBaaS v2 API. type Service struct { projectID string + scope *scope.Scope loadbalancerClient LbClient networkingService *networking.Service - logger logr.Logger } // NewService returns an instance of the loadbalancer service. -func NewService(client *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, logger logr.Logger) (*Service, error) { - loadbalancerClient, err := openstack.NewLoadBalancerV2(client, gophercloud.EndpointOpts{ - Region: clientOpts.RegionName, +func NewService(scope *scope.Scope) (*Service, error) { + loadbalancerClient, err := openstack.NewLoadBalancerV2(scope.ProviderClient, gophercloud.EndpointOpts{ + Region: scope.ProviderClientOpts.RegionName, }) if err != nil { return nil, fmt.Errorf("failed to create load balancer service client: %v", err) } - networkingService, err := networking.NewService(client, clientOpts, logger) + networkingService, err := networking.NewService(scope) if err != nil { return nil, fmt.Errorf("failed to create networking service: %v", err) } return &Service{ + scope: scope, loadbalancerClient: lbClient{ serviceClient: loadbalancerClient, }, networkingService: networkingService, - logger: logger, }, nil } @@ -62,9 +62,11 @@ func NewService(client *gophercloud.ProviderClient, clientOpts *clientconfig.Cli // It helps to mock the load balancer service in other packages. func NewLoadBalancerTestService(projectID string, lbClient LbClient, client *networking.Service, logger logr.Logger) *Service { return &Service{ - projectID: projectID, + projectID: projectID, + scope: &scope.Scope{ + Logger: logger, + }, loadbalancerClient: lbClient, networkingService: client, - logger: logger, } } diff --git a/pkg/cloud/services/networking/floatingip.go b/pkg/cloud/services/networking/floatingip.go index 8b13b3e9b9..7a06fd8a62 100644 --- a/pkg/cloud/services/networking/floatingip.go +++ b/pkg/cloud/services/networking/floatingip.go @@ -120,7 +120,7 @@ var backoff = wait.Backoff{ } func (s *Service) AssociateFloatingIP(eventObject runtime.Object, fp *floatingips.FloatingIP, portID string) error { - s.logger.Info("Associating floating IP", "id", fp.ID, "ip", fp.FloatingIP) + s.scope.Logger.Info("Associating floating IP", "id", fp.ID, "ip", fp.FloatingIP) if fp.PortID == portID { record.Eventf(eventObject, "SuccessfulAssociateFloatingIP", "Floating IP %s already associated with port %s", fp.FloatingIP, portID) @@ -152,11 +152,11 @@ func (s *Service) DisassociateFloatingIP(eventObject runtime.Object, ip string) return err } if fip == nil || fip.FloatingIP == "" { - s.logger.Info("Floating IP not associated", "ip", ip) + s.scope.Logger.Info("Floating IP not associated", "ip", ip) return nil } - s.logger.Info("Disassociating floating IP", "id", fip.ID, "ip", fip.FloatingIP) + s.scope.Logger.Info("Disassociating floating IP", "id", fip.ID, "ip", fip.FloatingIP) fpUpdateOpts := &floatingips.UpdateOpts{ PortID: nil, @@ -178,7 +178,7 @@ func (s *Service) DisassociateFloatingIP(eventObject runtime.Object, ip string) } func (s *Service) waitForFloatingIP(id, target string) error { - s.logger.Info("Waiting for floating IP", "id", id, "targetStatus", target) + s.scope.Logger.Info("Waiting for floating IP", "id", id, "targetStatus", target) return wait.ExponentialBackoff(backoff, func() (bool, error) { fip, err := s.client.GetFloatingIP(id) if err != nil { diff --git a/pkg/cloud/services/networking/network.go b/pkg/cloud/services/networking/network.go index 3221c0a48c..a83a568c30 100644 --- a/pkg/cloud/services/networking/network.go +++ b/pkg/cloud/services/networking/network.go @@ -74,7 +74,7 @@ func (s *Service) ReconcileExternalNetwork(openStackCluster *infrav1.OpenStackCl case 0: // Not finding an external network is fine openStackCluster.Status.ExternalNetwork = &infrav1.Network{} - s.logger.Info("No external network found - proceeding with internal network only") + s.scope.Logger.Info("No external network found - proceeding with internal network only") return nil case 1: openStackCluster.Status.ExternalNetwork = &infrav1.Network{ @@ -82,7 +82,7 @@ func (s *Service) ReconcileExternalNetwork(openStackCluster *infrav1.OpenStackCl Name: networkList[0].Name, Tags: networkList[0].Tags, } - s.logger.Info("External network found", "network id", networkList[0].ID) + s.scope.Logger.Info("External network found", "network id", networkList[0].ID) return nil } return fmt.Errorf("found %d external networks, which should not happen", len(networkList)) @@ -90,7 +90,7 @@ func (s *Service) ReconcileExternalNetwork(openStackCluster *infrav1.OpenStackCl func (s *Service) ReconcileNetwork(openStackCluster *infrav1.OpenStackCluster, clusterName string) error { networkName := getNetworkName(clusterName) - s.logger.Info("Reconciling network", "name", networkName) + s.scope.Logger.Info("Reconciling network", "name", networkName) res, err := s.getNetworkByName(networkName) if err != nil { @@ -105,7 +105,7 @@ func (s *Service) ReconcileNetwork(openStackCluster *infrav1.OpenStackCluster, c Tags: res.Tags, } sInfo := fmt.Sprintf("Reuse Existing Network %s with id %s", res.Name, res.ID) - s.logger.V(6).Info(sInfo) + s.scope.Logger.V(6).Info(sInfo) return nil } @@ -169,12 +169,12 @@ func (s *Service) DeleteNetwork(openStackCluster *infrav1.OpenStackCluster, clus func (s *Service) ReconcileSubnet(openStackCluster *infrav1.OpenStackCluster, clusterName string) error { if openStackCluster.Status.Network == nil || openStackCluster.Status.Network.ID == "" { - s.logger.V(4).Info("No need to reconcile network components since no network exists.") + s.scope.Logger.V(4).Info("No need to reconcile network components since no network exists.") return nil } subnetName := getSubnetName(clusterName) - s.logger.Info("Reconciling subnet", "name", subnetName) + s.scope.Logger.Info("Reconciling subnet", "name", subnetName) subnetList, err := s.client.ListSubnet(subnets.ListOpts{ NetworkID: openStackCluster.Status.Network.ID, @@ -197,7 +197,7 @@ func (s *Service) ReconcileSubnet(openStackCluster *infrav1.OpenStackCluster, cl } } else if len(subnetList) == 1 { subnet = &subnetList[0] - s.logger.V(6).Info(fmt.Sprintf("Reuse existing subnet %s with id %s", subnetName, subnet.ID)) + s.scope.Logger.V(6).Info(fmt.Sprintf("Reuse existing subnet %s with id %s", subnetName, subnet.ID)) } openStackCluster.Status.Network.Subnet = &infrav1.Subnet{ diff --git a/pkg/cloud/services/networking/router.go b/pkg/cloud/services/networking/router.go index 3f6b556eea..442b41c9d9 100644 --- a/pkg/cloud/services/networking/router.go +++ b/pkg/cloud/services/networking/router.go @@ -32,20 +32,20 @@ import ( func (s *Service) ReconcileRouter(openStackCluster *infrav1.OpenStackCluster, clusterName string) error { if openStackCluster.Status.Network == nil || openStackCluster.Status.Network.ID == "" { - s.logger.V(3).Info("No need to reconcile router since no network exists.") + s.scope.Logger.V(3).Info("No need to reconcile router since no network exists.") return nil } if openStackCluster.Status.Network.Subnet == nil || openStackCluster.Status.Network.Subnet.ID == "" { - s.logger.V(4).Info("No need to reconcile router since no subnet exists.") + s.scope.Logger.V(4).Info("No need to reconcile router since no subnet exists.") return nil } if openStackCluster.Status.ExternalNetwork == nil || openStackCluster.Status.ExternalNetwork.ID == "" { - s.logger.V(3).Info("No need to create router, due to missing ExternalNetworkID.") + s.scope.Logger.V(3).Info("No need to create router, due to missing ExternalNetworkID.") return nil } routerName := getRouterName(clusterName) - s.logger.Info("Reconciling router", "name", routerName) + s.scope.Logger.Info("Reconciling router", "name", routerName) routerList, err := s.client.ListRouter(routers.ListOpts{ Name: routerName, @@ -67,7 +67,7 @@ func (s *Service) ReconcileRouter(openStackCluster *infrav1.OpenStackCluster, cl } } else { router = &routerList[0] - s.logger.V(6).Info(fmt.Sprintf("Reuse existing Router %s with id %s", routerName, router.ID)) + s.scope.Logger.V(6).Info(fmt.Sprintf("Reuse existing Router %s with id %s", routerName, router.ID)) } openStackCluster.Status.Network.Router = &infrav1.Router{ @@ -101,14 +101,14 @@ INTERFACE_LOOP: // ... and create a router interface for our subnet. if createInterface { - s.logger.V(4).Info("Creating RouterInterface", "routerID", router.ID, "subnetID", openStackCluster.Status.Network.Subnet.ID) + s.scope.Logger.V(4).Info("Creating RouterInterface", "routerID", router.ID, "subnetID", openStackCluster.Status.Network.Subnet.ID) routerInterface, err := s.client.AddRouterInterface(router.ID, routers.AddInterfaceOpts{ SubnetID: openStackCluster.Status.Network.Subnet.ID, }) if err != nil { return fmt.Errorf("unable to create router interface: %v", err) } - s.logger.V(4).Info("Created RouterInterface", "id", routerInterface.ID) + s.scope.Logger.V(4).Info("Created RouterInterface", "id", routerInterface.ID) } return nil } @@ -201,9 +201,9 @@ func (s *Service) DeleteRouter(openStackCluster *infrav1.OpenStackCluster, clust if !capoerrors.IsNotFound(err) { return fmt.Errorf("unable to remove router interface: %v", err) } - s.logger.V(4).Info("Router Interface already removed, nothing to do", "id", router.ID) + s.scope.Logger.V(4).Info("Router Interface already removed, nothing to do", "id", router.ID) } else { - s.logger.V(4).Info("Removed RouterInterface of Router", "id", router.ID) + s.scope.Logger.V(4).Info("Removed RouterInterface of Router", "id", router.ID) } } diff --git a/pkg/cloud/services/networking/securitygroups.go b/pkg/cloud/services/networking/securitygroups.go index ca9ba84228..c339a629a4 100644 --- a/pkg/cloud/services/networking/securitygroups.go +++ b/pkg/cloud/services/networking/securitygroups.go @@ -58,9 +58,9 @@ var defaultRules = []infrav1.SecurityGroupRule{ // ReconcileSecurityGroups reconcile the security groups. func (s *Service) ReconcileSecurityGroups(openStackCluster *infrav1.OpenStackCluster, clusterName string) error { - s.logger.Info("Reconciling security groups", "cluster", clusterName) + s.scope.Logger.Info("Reconciling security groups", "cluster", clusterName) if !openStackCluster.Spec.ManagedSecurityGroups { - s.logger.V(4).Info("No need to reconcile security groups", "cluster", clusterName) + s.scope.Logger.V(4).Info("No need to reconcile security groups", "cluster", clusterName) return nil } @@ -501,16 +501,16 @@ func (s *Service) reconcileGroupRules(desired, observed infrav1.SecurityGroup) ( } } - s.logger.V(4).Info("Deleting rules not needed anymore for group", "name", observed.Name, "amount", len(rulesToDelete)) + s.scope.Logger.V(4).Info("Deleting rules not needed anymore for group", "name", observed.Name, "amount", len(rulesToDelete)) for _, rule := range rulesToDelete { - s.logger.V(6).Info("Deleting rule", "ruleID", rule.ID, "groupName", observed.Name) + s.scope.Logger.V(6).Info("Deleting rule", "ruleID", rule.ID, "groupName", observed.Name) err := s.client.DeleteSecGroupRule(rule.ID) if err != nil { return infrav1.SecurityGroup{}, err } } - s.logger.V(4).Info("Creating new rules needed for group", "name", observed.Name, "amount", len(rulesToCreate)) + s.scope.Logger.V(4).Info("Creating new rules needed for group", "name", observed.Name, "amount", len(rulesToCreate)) for _, rule := range rulesToCreate { r := rule r.SecurityGroupID = observed.ID @@ -534,13 +534,13 @@ func (s *Service) createSecurityGroupIfNotExists(openStackCluster *infrav1.OpenS return err } if secGroup == nil || secGroup.ID == "" { - s.logger.V(6).Info("Group doesn't exist, creating it.", "name", groupName) + s.scope.Logger.V(6).Info("Group doesn't exist, creating it.", "name", groupName) createOpts := groups.CreateOpts{ Name: groupName, Description: "Cluster API managed group", } - s.logger.V(6).Info("Creating group", "name", groupName) + s.scope.Logger.V(6).Info("Creating group", "name", groupName) group, err := s.client.CreateSecGroup(createOpts) if err != nil { @@ -562,7 +562,7 @@ func (s *Service) createSecurityGroupIfNotExists(openStackCluster *infrav1.OpenS } sInfo := fmt.Sprintf("Reuse Existing SecurityGroup %s with %s", groupName, secGroup.ID) - s.logger.V(6).Info(sInfo) + s.scope.Logger.V(6).Info(sInfo) return nil } @@ -572,7 +572,7 @@ func (s *Service) getSecurityGroupByName(name string) (*infrav1.SecurityGroup, e Name: name, } - s.logger.V(6).Info("Attempting to fetch security group with", "name", name) + s.scope.Logger.V(6).Info("Attempting to fetch security group with", "name", name) allGroups, err := s.client.ListSecGroup(opts) if err != nil { return &infrav1.SecurityGroup{}, err @@ -604,7 +604,7 @@ func (s *Service) createRule(r infrav1.SecurityGroupRule) (infrav1.SecurityGroup RemoteIPPrefix: r.RemoteIPPrefix, SecGroupID: r.SecurityGroupID, } - s.logger.V(6).Info("Creating rule", "Description", r.Description, "Direction", dir, "PortRangeMin", r.PortRangeMin, "PortRangeMax", r.PortRangeMax, "Proto", proto, "etherType", etherType, "RemoteGroupID", r.RemoteGroupID, "RemoteIPPrefix", r.RemoteIPPrefix, "SecurityGroupID", r.SecurityGroupID) + s.scope.Logger.V(6).Info("Creating rule", "Description", r.Description, "Direction", dir, "PortRangeMin", r.PortRangeMin, "PortRangeMax", r.PortRangeMax, "Proto", proto, "etherType", etherType, "RemoteGroupID", r.RemoteGroupID, "RemoteIPPrefix", r.RemoteIPPrefix, "SecurityGroupID", r.SecurityGroupID) rule, err := s.client.CreateSecGroupRule(createOpts) if err != nil { return infrav1.SecurityGroupRule{}, err diff --git a/pkg/cloud/services/networking/service.go b/pkg/cloud/services/networking/service.go index e12b167c2f..9732c8a458 100644 --- a/pkg/cloud/services/networking/service.go +++ b/pkg/cloud/services/networking/service.go @@ -24,11 +24,11 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" - "github.com/gophercloud/utils/openstack/clientconfig" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/provider" "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) const ( @@ -41,32 +41,32 @@ const ( // It will create a network related infrastructure for the cluster, like network, subnet, router, security groups. type Service struct { projectID string + scope *scope.Scope client NetworkClient - logger logr.Logger } // NewService returns an instance of the networking service. -func NewService(providerClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, logger logr.Logger) (*Service, error) { - serviceClient, err := openstack.NewNetworkV2(providerClient, gophercloud.EndpointOpts{ - Region: clientOpts.RegionName, +func NewService(scope *scope.Scope) (*Service, error) { + serviceClient, err := openstack.NewNetworkV2(scope.ProviderClient, gophercloud.EndpointOpts{ + Region: scope.ProviderClientOpts.RegionName, }) if err != nil { return nil, fmt.Errorf("failed to create networking service providerClient: %v", err) } - if clientOpts.AuthInfo == nil { + if scope.ProviderClientOpts.AuthInfo == nil { return nil, fmt.Errorf("failed to get project id: authInfo must be set") } - projectID, err := provider.GetProjectID(providerClient, clientOpts) + projectID, err := provider.GetProjectID(scope.ProviderClient, scope.ProviderClientOpts) if err != nil { return nil, fmt.Errorf("error retrieveing project id: %v", err) } return &Service{ projectID: projectID, + scope: scope, client: networkClient{serviceClient}, - logger: logger, }, nil } @@ -74,8 +74,10 @@ func NewService(providerClient *gophercloud.ProviderClient, clientOpts *clientco func NewTestService(projectID string, client NetworkClient, logger logr.Logger) *Service { return &Service{ projectID: projectID, - client: client, - logger: logger, + scope: &scope.Scope{ + Logger: logger, + }, + client: client, } } @@ -83,7 +85,7 @@ func NewTestService(projectID string, client NetworkClient, logger logr.Logger) // the value of resourceType must match one of the allowed constants: trunkResource or portResource. func (s *Service) replaceAllAttributesTags(eventObject runtime.Object, resourceType string, resourceID string, tags []string) error { if len(tags) == 0 { - s.logger.Info("no tags provided to ReplaceAllAttributesTags", "resourceType", resourceType, "resourceID", resourceID) + s.scope.Logger.Info("no tags provided to ReplaceAllAttributesTags", "resourceType", resourceType, "resourceID", resourceID) return nil } if resourceType != trunkResource && resourceType != portResource { diff --git a/pkg/scope/scope.go b/pkg/scope/scope.go new file mode 100644 index 0000000000..a9ce8c5fd4 --- /dev/null +++ b/pkg/scope/scope.go @@ -0,0 +1,37 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scope + +import ( + "github.com/go-logr/logr" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/utils/openstack/clientconfig" +) + +// Scope is used to initialize Services from Controllers and includes the +// common objects required for this. +// +// The Gophercloud ProviderClient and ClientOpts are required to create new +// Gophercloud API Clients (e.g. for Networking/Neutron). +// +// The Logger includes context values such as the cluster name. +type Scope struct { + ProviderClient *gophercloud.ProviderClient + ProviderClientOpts *clientconfig.ClientOpts + + Logger logr.Logger +}