From d9d0d6fc0fdca528e038f55fa7ddf21e31ed4f54 Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Mon, 11 Dec 2023 12:43:42 -0500 Subject: [PATCH] WIP - move ports out of instance --- api/v1alpha7/conditions_consts.go | 6 + api/v1alpha7/openstackmachine_types.go | 4 + api/v1alpha7/types.go | 6 + controllers/openstackcluster_controller.go | 2 +- controllers/openstackmachine_controller.go | 100 +++-- controllers/suite_test.go | 2 +- pkg/clients/networking.go | 5 + pkg/cloud/services/compute/instance.go | 66 ++-- pkg/cloud/services/compute/instance_test.go | 44 +-- pkg/cloud/services/networking/port.go | 217 +++++++++++ pkg/cloud/services/networking/port_test.go | 405 ++++++++++++++++++++ pkg/utils/controller/controller.go | 31 ++ 12 files changed, 791 insertions(+), 97 deletions(-) create mode 100644 pkg/utils/controller/controller.go diff --git a/api/v1alpha7/conditions_consts.go b/api/v1alpha7/conditions_consts.go index b54eade0b0..49fa89da8d 100644 --- a/api/v1alpha7/conditions_consts.go +++ b/api/v1alpha7/conditions_consts.go @@ -22,6 +22,10 @@ const ( // InstanceReadyCondition reports on current status of the OpenStack instance. Ready indicates the instance is in a Running state. InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + // PortsReadyCondition reports on the current status of the ports. Ready indicates that all ports have been created + // successfully and ready to be attached to the instance. + PortsReadyCondition clusterv1.ConditionType = "PortsReady" + // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding. WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. @@ -42,6 +46,8 @@ const ( InstanceDeleteFailedReason = "InstanceDeleteFailed" // OpenstackErrorReason used when there is an error communicating with OpenStack. OpenStackErrorReason = "OpenStackError" + // PortsCreateFailedReason used when creating the ports failed. + PortsCreateFailedReason = "PortsCreateFailed" ) const ( diff --git a/api/v1alpha7/openstackmachine_types.go b/api/v1alpha7/openstackmachine_types.go index f5f4f698aa..c22ca58845 100644 --- a/api/v1alpha7/openstackmachine_types.go +++ b/api/v1alpha7/openstackmachine_types.go @@ -113,6 +113,10 @@ type OpenStackMachineStatus struct { FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + // PortsStatus is the status of the ports associated with the machine. + // +optional + PortsStatus []PortStatus `json:"portsStatus,omitempty"` + // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. diff --git a/api/v1alpha7/types.go b/api/v1alpha7/types.go index 3ef5305ff2..1052026ca2 100644 --- a/api/v1alpha7/types.go +++ b/api/v1alpha7/types.go @@ -128,6 +128,12 @@ type PortOpts struct { ValueSpecs []ValueSpec `json:"valueSpecs,omitempty"` } +type PortStatus struct { + // ID is the unique identifier of the port. + // +optional + ID string `json:"id,omitempty"` +} + type BindingProfile struct { // OVSHWOffload enables or disables the OVS hardware offload feature. OVSHWOffload bool `json:"ovsHWOffload,omitempty"` diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 08177fc9ed..b72792fc02 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -349,7 +349,7 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl } } - instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name, []string{}) if err != nil { return fmt.Errorf("failed to reconcile bastion: %w", err) } diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index b0345c078c..447fc09780 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -328,9 +328,24 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData) + managedSecurityGroups := getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + instanceTags := getInstanceTags(openStackMachine, openStackCluster) + portsStatus, err := r.getOrCreatePorts(scope.Logger(), clusterName, openStackCluster, openStackMachine.Spec.Ports, openStackMachine.Spec.Trunk, managedSecurityGroups, instanceTags, openStackMachine.Name, openStackMachine, networkingService) if err != nil { - // Conditions set in getOrCreate + // Conditions set in getOrCreatePorts + return ctrl.Result{}, err + } + // TODO(emilien) do we want to deal with Ports Status like we do for the nova instance? Might be expensive... + // In the meantime, we consider that ports are ready if they are created. + conditions.MarkTrue(openStackMachine, infrav1.PortsReadyCondition) + portIDs := make([]string, len(*portsStatus)) + for i, portStatus := range *portsStatus { + portIDs[i] = portStatus.ID + } + + instanceStatus, err := r.getOrCreateInstance(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData, portIDs) + if err != nil { + // Conditions set in getOrCreateInstance return ctrl.Result{}, err } @@ -430,7 +445,33 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { +func (r *OpenStackMachineReconciler) getOrCreatePorts(logger logr.Logger, clusterName string, openStackCluster *infrav1.OpenStackCluster, machinePorts []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) (*[]infrav1.PortStatus, error) { + // TODO(emilien) implement portsStatus where we check if the machine has ports created and part of the OpenStackMachine.Status + // Check `GetInstanceStatusByName` as it's done for instances. For each port found in Status, we might want get the port name and verify there is no duplicate. + // If Status is empty, we create the ports and add them to the Status. + // If Status is not empty, we try to adopt the existing ports by checking if the ports are still there and if not, we create them and add them to the Status. + + // For now, we create the ports. + var portsStatus *[]infrav1.PortStatus + var err error + + if openStackMachine.Status.PortsStatus == nil { + logger.Info("Creating ports for OpenStackMachine", "name", openStackMachine.Name) + portsStatus, err = networkingService.CreatePorts(openStackMachine, clusterName, openStackCluster, machinePorts, trunkEnabled, securityGroups, instanceTags, instanceName) + if err != nil { + // Add Conditions + conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, fmt.Errorf("create ports: %w", err) + } + } + + // TODO(emilien) maybe we want to set PortsStatus in OpenStackMachineStatus here instead of in port.go? + // Major difference of doing it in port.go is that we can add ports one by one into the status instead of all at once in the controller. + + return portsStatus, nil +} + +func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) { instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) if err != nil { logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name) @@ -441,7 +482,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl if instanceStatus == nil { instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, portIDs) if err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) return nil, fmt.Errorf("create OpenStack instance: %w", err) @@ -472,6 +513,19 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * instanceSpec.FailureDomain = *machine.Spec.FailureDomain } + instanceSpec.Tags = getInstanceTags(openStackMachine, openStackCluster) + + instanceSpec.SecurityGroups = getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + + instanceSpec.Ports = openStackMachine.Spec.Ports + + return &instanceSpec +} + +// getInstanceTags returns the tags that should be applied to the instance. +// The tags are a combination of the tags specified on the OpenStackMachine and +// the ones specified on the OpenStackCluster. +func getInstanceTags(openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) []string { machineTags := []string{} // Append machine specific tags @@ -494,31 +548,31 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * } machineTags = deduplicate(machineTags) - instanceSpec.Tags = machineTags + return machineTags +} - instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups - if openStackCluster.Spec.ManagedSecurityGroups { - var managedSecurityGroup string - if util.IsControlPlaneMachine(machine) { - if openStackCluster.Status.ControlPlaneSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID - } - } else { - if openStackCluster.Status.WorkerSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID - } +// getManagedSecurityGroups returns a combination of OpenStackMachine.Spec.SecurityGroups +// and the security group managed by the OpenStackCluster whether it's a control plane or a worker machine. +func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) []infrav1.SecurityGroupFilter { + machineSpecSecurityGroups := openStackMachine.Spec.SecurityGroups + var managedSecurityGroup string + if util.IsControlPlaneMachine(machine) { + if openStackCluster.Status.ControlPlaneSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID } - - if managedSecurityGroup != "" { - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{ - ID: managedSecurityGroup, - }) + } else { + if openStackCluster.Status.WorkerSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID } } - instanceSpec.Ports = openStackMachine.Spec.Ports + if managedSecurityGroup != "" { + machineSpecSecurityGroups = append(machineSpecSecurityGroups, infrav1.SecurityGroupFilter{ + ID: managedSecurityGroup, + }) + } - return &instanceSpec + return machineSpecSecurityGroups } func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b0edc98d5f..1cb8a8ba32 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -151,7 +151,7 @@ var _ = Describe("When calling getOrCreate", func() { openStackMachine := &infrav1.OpenStackMachine{} mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(nil, errors.New("Test error when listing servers")) - instanceStatus, err := reconsiler.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "") + instanceStatus, err := reconsiler.getOrCreateInstance(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "", []string{}) Expect(err).To(HaveOccurred()) Expect(instanceStatus).To(BeNil()) conditions := openStackMachine.GetConditions() diff --git a/pkg/clients/networking.go b/pkg/clients/networking.go index 23ad3c8fdf..e5f609c74d 100644 --- a/pkg/clients/networking.go +++ b/pkg/clients/networking.go @@ -36,6 +36,11 @@ import ( "sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics" ) +// PortExt is the base gophercloud Port with extensions used by PortStatus. +type PortExt struct { + ports.Port +} + type NetworkClient interface { ListFloatingIP(opts floatingips.ListOptsBuilder) ([]floatingips.FloatingIP, error) CreateFloatingIP(opts floatingips.CreateOptsBuilder) (*floatingips.FloatingIP, error) diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index c6b70af87e..09a531b2bd 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -90,7 +90,6 @@ func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster * if fixedIP.Subnet == nil { continue } - subnet, err := networkingService.GetSubnetByFilter(fixedIP.Subnet) if err != nil { // Multiple matches might be ok later when we restrict matches to a single network @@ -221,8 +220,8 @@ func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ins return ports, nil } -func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) { - return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus) +func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, portIDs []string) (*InstanceStatus, error) { + return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus, portIDs) } func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, error) { @@ -234,7 +233,7 @@ func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, erro return f, nil } -func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) { +func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration, portIDs []string) (*InstanceStatus, error) { var server *clients.ServerExt portList := []servers.Network{} @@ -254,41 +253,50 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste return } + // TODO(emilien) we probably want to move that in port management if err := s.deletePorts(eventObject, portList); err != nil { s.scope.Logger().V(4).Error(err, "Failed to clean up ports after failure") } }() - ports, err := s.constructPorts(openStackCluster, instanceSpec) - if err != nil { - return nil, err - } - - networkingService, err := s.getNetworkingService() - if err != nil { - return nil, err - } - - securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) - if err != nil { - return nil, fmt.Errorf("error getting security groups: %v", err) - } - - for i := range ports { - portOpts := &ports[i] - iTags := []string{} - if len(instanceSpec.Tags) > 0 { - iTags = instanceSpec.Tags + if len(portIDs) == 0 { + ports, err := s.constructPorts(openStackCluster, instanceSpec) + if err != nil { + return nil, err } - portName := networking.GetPortName(instanceSpec.Name, portOpts, i) - port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) + + networkingService, err := s.getNetworkingService() if err != nil { return nil, err } - portList = append(portList, servers.Network{ - Port: port.ID, - }) + securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + for i := range ports { + portOpts := &ports[i] + iTags := []string{} + if len(instanceSpec.Tags) > 0 { + iTags = instanceSpec.Tags + } + portName := networking.GetPortName(instanceSpec.Name, portOpts, i) + port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) + if err != nil { + return nil, err + } + + portList = append(portList, servers.Network{ + Port: port.ID, + }) + } + } else { + for _, portID := range portIDs { + portList = append(portList, servers.Network{ + Port: portID, + }) + } } instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate) diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index cafc9e3edc..b8e33b8c8d 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -48,7 +48,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) @@ -77,47 +76,6 @@ func (m *gomegaMockMatcher) Matches(x interface{}) bool { return success } -func Test_getPortName(t *testing.T) { - type args struct { - instanceName string - opts *infrav1.PortOpts - netIndex int - } - tests := []struct { - name string - args args - want string - }{ - { - name: "with nil PortOpts", - args: args{"test-1-instance", nil, 2}, - want: "test-1-instance-2", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, - want: "test-1-instance-foo", - }, - { - name: "without PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, - want: "test-1-instance-4", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, - want: "test-1-instance-foo2", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := networking.GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { - t.Errorf("getPortName() = %v, want %v", got, tt.want) - } - }) - } -} - func TestService_getImageID(t *testing.T) { const imageIDA = "ce96e584-7ebc-46d6-9e55-987d72e3806c" const imageIDB = "8f536889-5198-42d7-8314-cb78f4f4755c" @@ -976,7 +934,7 @@ func TestService_ReconcileInstance(t *testing.T) { } // Call CreateInstance with a reduced retry interval to speed up the test - _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond) + _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond, []string{}) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/cloud/services/networking/port.go b/pkg/cloud/services/networking/port.go index e3e9b0d8dd..a3402b11c4 100644 --- a/pkg/cloud/services/networking/port.go +++ b/pkg/cloud/services/networking/port.go @@ -18,6 +18,7 @@ package networking import ( "context" + "errors" "fmt" "strings" "time" @@ -30,6 +31,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" + capocontroller "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/controller" capoerrors "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/errors" "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names" ) @@ -330,3 +332,218 @@ func GetPortName(instanceName string, opts *infrav1.PortOpts, netIndex int) stri } return fmt.Sprintf("%s-%d", instanceName, netIndex) } + +func (s *Service) CreatePorts(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + return s.createPortsImpl(eventObject, clusterName, openStackCluster, ports, trunkEnabled, securityGroups, instanceTags, instanceName) +} + +func (s *Service) createPortsImpl(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + instancePorts, err := s.constructPorts(openStackCluster, ports, trunkEnabled) + if err != nil { + return nil, err + } + + instanceSecurityGroups, err := s.GetSecurityGroups(securityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + portsStatus := make([]infrav1.PortStatus, 0, len(instancePorts)) + + for i := range instancePorts { + portOpts := &instancePorts[i] + iTags := []string{} + if len(instanceTags) > 0 { + iTags = instanceTags + } + portName := GetPortName(instanceName, portOpts, i) + port, err := s.GetOrCreatePort(eventObject, clusterName, portName, portOpts, instanceSecurityGroups, iTags) + if err != nil { + record.Warnf(eventObject, "FailedCreatePort", "Failed to create port; name=%s err=%v", portName, err) + return nil, err + } + record.Eventf(eventObject, "SuccessfulCreatePort", "Created port; id=%s", port.ID) + + portsStatus = append(portsStatus, infrav1.PortStatus{ + ID: port.ID, + }) + } + + // TODO(emilien) do we want to set status here or in the controller? + openstackMachineStatus, err := capocontroller.GetOpenStackMachineStatus(eventObject) + if err != nil { + return nil, err + } + openstackMachineStatus.PortsStatus = portsStatus + + return &portsStatus, nil +} + +func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool) ([]infrav1.PortOpts, error) { + // Ensure user-specified ports have all required fields + ports, err := s.normalizePorts(ports, openStackCluster, trunkEnabled) + if err != nil { + return nil, err + } + + // no networks or ports found in the spec, so create a port on the cluster network + if len(ports) == 0 { + port := infrav1.PortOpts{ + Network: &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + }, + Trunk: &trunkEnabled, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) + } + ports = []infrav1.PortOpts{port} + } + + // trunk support is required if any port has trunk enabled + portUsesTrunk := func() bool { + for _, port := range ports { + if port.Trunk != nil && *port.Trunk { + return true + } + } + return false + } + if portUsesTrunk() { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return nil, err + } + if !trunkSupported { + return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") + } + } + + return ports, nil + +} + +func (s *Service) normalizePorts(ports []infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, trunkEnabled bool) ([]infrav1.PortOpts, error) { + normalizedPorts := make([]infrav1.PortOpts, 0, len(ports)) + for i := range ports { + // Deep copy the port to avoid mutating the original + port := ports[i].DeepCopy() + + // No Trunk field specified for the port, inherit the machine default + if port.Trunk == nil { + port.Trunk = &trunkEnabled + } + + if err := s.normalizePortTarget(port, openStackCluster, i); err != nil { + return nil, err + } + + normalizedPorts = append(normalizedPorts, *port) + } + return normalizedPorts, nil +} + +func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, portIdx int) error { + // Treat no Network and empty Network the same + noNetwork := port.Network == nil || (*port.Network == infrav1.NetworkFilter{}) + + // No network or subnets defined: use cluster defaults + if noNetwork && len(port.FixedIPs) == 0 { + port.Network = &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) + } + + return nil + } + + // No network, but fixed IPs are defined(we handled the no fixed + // IPs case above): try to infer network from a subnet + if noNetwork { + s.scope.Logger().V(4).Info("No network defined for port, attempting to infer from subnet", "port", portIdx) + + // Look for a unique subnet defined in FixedIPs. If we find one + // we can use it to infer the network ID. We don't need to worry + // here about the case where different FixedIPs have different + // networks because that will cause an error later when we try + // to create the port. + networkID, err := func() (string, error) { + for i, fixedIP := range port.FixedIPs { + if fixedIP.Subnet == nil { + continue + } + + subnet, err := s.GetSubnetByFilter(fixedIP.Subnet) + if err != nil { + // Multiple matches might be ok later when we restrict matches to a single network + if errors.Is(err, ErrMultipleMatches) { + s.scope.Logger().V(4).Info("Couldn't infer network from subnet", "subnetIndex", i, "err", err) + continue + } + + return "", err + } + + // Cache the subnet ID in the FixedIP + fixedIP.Subnet.ID = subnet.ID + return subnet.NetworkID, nil + } + + // TODO: This is a spec error: it should set the machine to failed + return "", fmt.Errorf("port %d has no network and unable to infer from fixed IPs", portIdx) + }() + if err != nil { + return err + } + + port.Network = &infrav1.NetworkFilter{ + ID: networkID, + } + + return nil + } + + // Nothing to do if network ID is already set + if port.Network.ID != "" { + return nil + } + + // Network is defined by Filter + netIDs, err := s.GetNetworkIDsByFilter(port.Network.ToListOpt()) + if err != nil { + return err + } + + // TODO: These are spec errors: they should set the machine to failed + if len(netIDs) > 1 { + return fmt.Errorf("network filter for port %d returns more than one result", portIdx) + } else if len(netIDs) == 0 { + return fmt.Errorf("network filter for port %d returns no networks", portIdx) + } + + port.Network.ID = netIDs[0] + + return nil +} + +// isTrunkExtSupported verifies trunk setup on the OpenStack deployment. +func (s *Service) isTrunkExtSupported() (trunknSupported bool, err error) { + trunkSupport, err := s.GetTrunkSupport() + if err != nil { + return false, fmt.Errorf("there was an issue verifying whether trunk support is available, Please try again later: %v", err) + } + if !trunkSupport { + return false, nil + } + return true, nil +} diff --git a/pkg/cloud/services/networking/port_test.go b/pkg/cloud/services/networking/port_test.go index d0e2f9ec91..77d8a50c4b 100644 --- a/pkg/cloud/services/networking/port_test.go +++ b/pkg/cloud/services/networking/port_test.go @@ -19,17 +19,22 @@ package networking import ( "testing" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" . "github.com/onsi/gomega" + "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) func Test_GetOrCreatePort(t *testing.T) { @@ -618,3 +623,403 @@ func Test_GarbageCollectErrorInstancesPort(t *testing.T) { func pointerTo(b bool) *bool { return &b } + +func TestService_normalizePorts(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + const ( + defaultNetworkID = "3c66f3ca-2d26-4d9d-ae3b-568f54129773" + defaultSubnetID = "d8dbba89-8c39-4192-a571-e702fca35bac" + + networkID = "afa54944-1443-4132-9ef5-ce37eb4d6ab6" + subnetID = "d786e715-c299-4a97-911d-640c10fc0392" + ) + + openStackCluster := &infrav1.OpenStackCluster{ + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: defaultNetworkID, + }, + Subnets: []infrav1.Subnet{ + {ID: defaultSubnetID}, + }, + }, + }, + } + + tests := []struct { + name string + ports []infrav1.PortOpts + instanceTrunk bool + expectNetwork func(m *mock.MockNetworkClientMockRecorder) + want []infrav1.PortOpts + wantErr bool + }{ + { + name: "No ports: no ports", + ports: []infrav1.PortOpts{}, + want: []infrav1.PortOpts{}, + }, + { + name: "Nil network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: nil, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Empty network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Port inherits trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + instanceTrunk: true, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Port overrides trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + Trunk: pointer.Bool(true), + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Network defined by ID: unchanged", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Network defined by filter: add ID from network lookup", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + Name: "test-network", + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListNetwork(networks.ListOpts{Name: "test-network"}).Return([]networks.Network{ + {ID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + Name: "test-network", + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by ID: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.GetSubnet(subnetID).Return(&subnets.Subnet{ID: subnetID, NetworkID: networkID}, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by filter: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet", + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP subnet returns no matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{}, nil) + }, + wantErr: true, + }, + { + name: "No network, only fixed IP subnet returns multiple matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + }, + wantErr: true, + }, + { + name: "No network, first fixed IP subnet returns multiple matches: used ID from second fixed IP", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet2", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet1"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + m.ListSubnet(subnets.ListOpts{Name: "test-subnet2"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet2", + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + mockClient := mock.NewMockNetworkClient(mockCtrl) + if tt.expectNetwork != nil { + tt.expectNetwork(mockClient.EXPECT()) + } + s := Service{ + client: mockClient, + scope: scope.NewMockScopeFactory(mockCtrl, "", logr.Discard()), + } + + got, err := s.normalizePorts(tt.ports, openStackCluster, tt.instanceTrunk) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(tt.want), cmp.Diff(got, tt.want)) + }) + } +} + +func Test_getPortName(t *testing.T) { + type args struct { + instanceName string + opts *infrav1.PortOpts + netIndex int + } + tests := []struct { + name string + args args + want string + }{ + { + name: "with nil PortOpts", + args: args{"test-1-instance", nil, 2}, + want: "test-1-instance-2", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, + want: "test-1-instance-foo", + }, + { + name: "without PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, + want: "test-1-instance-4", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, + want: "test-1-instance-foo2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { + t.Errorf("getPortName() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/utils/controller/controller.go b/pkg/utils/controller/controller.go new file mode 100644 index 0000000000..1b2c844d06 --- /dev/null +++ b/pkg/utils/controller/controller.go @@ -0,0 +1,31 @@ +/* +Copyright 2023 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" +) + +func GetOpenStackMachineStatus(eventObject runtime.Object) (*infrav1.OpenStackMachineStatus, error) { + if eventObject == nil { + return nil, fmt.Errorf("event object is nil") + } + if machine, ok := eventObject.(*infrav1.OpenStackMachine); ok { + return &machine.Status, nil + } + return nil, fmt.Errorf("unable to get OpenStackMachine status from event object") +}