Skip to content

Commit

Permalink
WIP - move ports out of instance
Browse files Browse the repository at this point in the history
  • Loading branch information
EmilienM committed Dec 13, 2023
1 parent a873934 commit d9d0d6f
Show file tree
Hide file tree
Showing 12 changed files with 791 additions and 97 deletions.
6 changes: 6 additions & 0 deletions api/v1alpha7/conditions_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ const (
// InstanceReadyCondition reports on current status of the OpenStack instance. Ready indicates the instance is in a Running state.
InstanceReadyCondition clusterv1.ConditionType = "InstanceReady"

// PortsReadyCondition reports on the current status of the ports. Ready indicates that all ports have been created
// successfully and ready to be attached to the instance.
PortsReadyCondition clusterv1.ConditionType = "PortsReady"

// WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding.
WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure"
// WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding.
Expand All @@ -42,6 +46,8 @@ const (
InstanceDeleteFailedReason = "InstanceDeleteFailed"
// OpenstackErrorReason used when there is an error communicating with OpenStack.
OpenStackErrorReason = "OpenStackError"
// PortsCreateFailedReason used when creating the ports failed.
PortsCreateFailedReason = "PortsCreateFailed"
)

const (
Expand Down
4 changes: 4 additions & 0 deletions api/v1alpha7/openstackmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,10 @@ type OpenStackMachineStatus struct {

FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`

// PortsStatus is the status of the ports associated with the machine.
// +optional
PortsStatus []PortStatus `json:"portsStatus,omitempty"`

// FailureMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
Expand Down
6 changes: 6 additions & 0 deletions api/v1alpha7/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,12 @@ type PortOpts struct {
ValueSpecs []ValueSpec `json:"valueSpecs,omitempty"`
}

type PortStatus struct {
// ID is the unique identifier of the port.
// +optional
ID string `json:"id,omitempty"`
}

type BindingProfile struct {
// OVSHWOffload enables or disables the OVS hardware offload feature.
OVSHWOffload bool `json:"ovsHWOffload,omitempty"`
Expand Down
2 changes: 1 addition & 1 deletion controllers/openstackcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl
}
}

instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name)
instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name, []string{})
if err != nil {
return fmt.Errorf("failed to reconcile bastion: %w", err)
}
Expand Down
100 changes: 77 additions & 23 deletions controllers/openstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -328,9 +328,24 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, err
}

instanceStatus, err := r.getOrCreate(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData)
managedSecurityGroups := getManagedSecurityGroups(openStackCluster, machine, openStackMachine)
instanceTags := getInstanceTags(openStackMachine, openStackCluster)
portsStatus, err := r.getOrCreatePorts(scope.Logger(), clusterName, openStackCluster, openStackMachine.Spec.Ports, openStackMachine.Spec.Trunk, managedSecurityGroups, instanceTags, openStackMachine.Name, openStackMachine, networkingService)
if err != nil {
// Conditions set in getOrCreate
// Conditions set in getOrCreatePorts
return ctrl.Result{}, err
}
// TODO(emilien) do we want to deal with Ports Status like we do for the nova instance? Might be expensive...
// In the meantime, we consider that ports are ready if they are created.
conditions.MarkTrue(openStackMachine, infrav1.PortsReadyCondition)
portIDs := make([]string, len(*portsStatus))
for i, portStatus := range *portsStatus {
portIDs[i] = portStatus.ID
}

instanceStatus, err := r.getOrCreateInstance(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData, portIDs)
if err != nil {
// Conditions set in getOrCreateInstance
return ctrl.Result{}, err
}

Expand Down Expand Up @@ -430,7 +445,33 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, nil
}

func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) {
func (r *OpenStackMachineReconciler) getOrCreatePorts(logger logr.Logger, clusterName string, openStackCluster *infrav1.OpenStackCluster, machinePorts []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) (*[]infrav1.PortStatus, error) {
// TODO(emilien) implement portsStatus where we check if the machine has ports created and part of the OpenStackMachine.Status
// Check `GetInstanceStatusByName` as it's done for instances. For each port found in Status, we might want get the port name and verify there is no duplicate.
// If Status is empty, we create the ports and add them to the Status.
// If Status is not empty, we try to adopt the existing ports by checking if the ports are still there and if not, we create them and add them to the Status.

// For now, we create the ports.
var portsStatus *[]infrav1.PortStatus
var err error

if openStackMachine.Status.PortsStatus == nil {
logger.Info("Creating ports for OpenStackMachine", "name", openStackMachine.Name)
portsStatus, err = networkingService.CreatePorts(openStackMachine, clusterName, openStackCluster, machinePorts, trunkEnabled, securityGroups, instanceTags, instanceName)
if err != nil {
// Add Conditions
conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
return nil, fmt.Errorf("create ports: %w", err)
}
}

// TODO(emilien) maybe we want to set PortsStatus in OpenStackMachineStatus here instead of in port.go?
// Major difference of doing it in port.go is that we can add ports one by one into the status instead of all at once in the controller.

return portsStatus, nil
}

func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) {
instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name)
if err != nil {
logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name)
Expand All @@ -441,7 +482,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl
if instanceStatus == nil {
instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name)
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name)
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, portIDs)
if err != nil {
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
return nil, fmt.Errorf("create OpenStack instance: %w", err)
Expand Down Expand Up @@ -472,6 +513,19 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *
instanceSpec.FailureDomain = *machine.Spec.FailureDomain
}

instanceSpec.Tags = getInstanceTags(openStackMachine, openStackCluster)

instanceSpec.SecurityGroups = getManagedSecurityGroups(openStackCluster, machine, openStackMachine)

instanceSpec.Ports = openStackMachine.Spec.Ports

return &instanceSpec
}

// getInstanceTags returns the tags that should be applied to the instance.
// The tags are a combination of the tags specified on the OpenStackMachine and
// the ones specified on the OpenStackCluster.
func getInstanceTags(openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) []string {
machineTags := []string{}

// Append machine specific tags
Expand All @@ -494,31 +548,31 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine *
}
machineTags = deduplicate(machineTags)

instanceSpec.Tags = machineTags
return machineTags
}

instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups
if openStackCluster.Spec.ManagedSecurityGroups {
var managedSecurityGroup string
if util.IsControlPlaneMachine(machine) {
if openStackCluster.Status.ControlPlaneSecurityGroup != nil {
managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID
}
} else {
if openStackCluster.Status.WorkerSecurityGroup != nil {
managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID
}
// getManagedSecurityGroups returns a combination of OpenStackMachine.Spec.SecurityGroups
// and the security group managed by the OpenStackCluster whether it's a control plane or a worker machine.
func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) []infrav1.SecurityGroupFilter {
machineSpecSecurityGroups := openStackMachine.Spec.SecurityGroups
var managedSecurityGroup string
if util.IsControlPlaneMachine(machine) {
if openStackCluster.Status.ControlPlaneSecurityGroup != nil {
managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID
}

if managedSecurityGroup != "" {
instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{
ID: managedSecurityGroup,
})
} else {
if openStackCluster.Status.WorkerSecurityGroup != nil {
managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID
}
}

instanceSpec.Ports = openStackMachine.Spec.Ports
if managedSecurityGroup != "" {
machineSpecSecurityGroups = append(machineSpecSecurityGroups, infrav1.SecurityGroupFilter{
ID: managedSecurityGroup,
})
}

return &instanceSpec
return machineSpecSecurityGroups
}

func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error {
Expand Down
2 changes: 1 addition & 1 deletion controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ var _ = Describe("When calling getOrCreate", func() {
openStackMachine := &infrav1.OpenStackMachine{}

mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(nil, errors.New("Test error when listing servers"))
instanceStatus, err := reconsiler.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "")
instanceStatus, err := reconsiler.getOrCreateInstance(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "", []string{})
Expect(err).To(HaveOccurred())
Expect(instanceStatus).To(BeNil())
conditions := openStackMachine.GetConditions()
Expand Down
5 changes: 5 additions & 0 deletions pkg/clients/networking.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ import (
"sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics"
)

// PortExt is the base gophercloud Port with extensions used by PortStatus.
type PortExt struct {
ports.Port
}

type NetworkClient interface {
ListFloatingIP(opts floatingips.ListOptsBuilder) ([]floatingips.FloatingIP, error)
CreateFloatingIP(opts floatingips.CreateOptsBuilder) (*floatingips.FloatingIP, error)
Expand Down
66 changes: 37 additions & 29 deletions pkg/cloud/services/compute/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster *
if fixedIP.Subnet == nil {
continue
}

subnet, err := networkingService.GetSubnetByFilter(fixedIP.Subnet)
if err != nil {
// Multiple matches might be ok later when we restrict matches to a single network
Expand Down Expand Up @@ -221,8 +220,8 @@ func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ins
return ports, nil
}

func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) {
return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus)
func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, portIDs []string) (*InstanceStatus, error) {
return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus, portIDs)
}

func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, error) {
Expand All @@ -234,7 +233,7 @@ func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, erro
return f, nil
}

func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) {
func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration, portIDs []string) (*InstanceStatus, error) {
var server *clients.ServerExt
portList := []servers.Network{}

Expand All @@ -254,41 +253,50 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste
return
}

// TODO(emilien) we probably want to move that in port management
if err := s.deletePorts(eventObject, portList); err != nil {
s.scope.Logger().V(4).Error(err, "Failed to clean up ports after failure")
}
}()

ports, err := s.constructPorts(openStackCluster, instanceSpec)
if err != nil {
return nil, err
}

networkingService, err := s.getNetworkingService()
if err != nil {
return nil, err
}

securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups)
if err != nil {
return nil, fmt.Errorf("error getting security groups: %v", err)
}

for i := range ports {
portOpts := &ports[i]
iTags := []string{}
if len(instanceSpec.Tags) > 0 {
iTags = instanceSpec.Tags
if len(portIDs) == 0 {
ports, err := s.constructPorts(openStackCluster, instanceSpec)
if err != nil {
return nil, err
}
portName := networking.GetPortName(instanceSpec.Name, portOpts, i)
port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags)

networkingService, err := s.getNetworkingService()
if err != nil {
return nil, err
}

portList = append(portList, servers.Network{
Port: port.ID,
})
securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups)
if err != nil {
return nil, fmt.Errorf("error getting security groups: %v", err)
}

for i := range ports {
portOpts := &ports[i]
iTags := []string{}
if len(instanceSpec.Tags) > 0 {
iTags = instanceSpec.Tags
}
portName := networking.GetPortName(instanceSpec.Name, portOpts, i)
port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags)
if err != nil {
return nil, err
}

portList = append(portList, servers.Network{
Port: port.ID,
})
}
} else {
for _, portID := range portIDs {
portList = append(portList, servers.Network{
Port: portID,
})
}
}

instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate)
Expand Down
44 changes: 1 addition & 43 deletions pkg/cloud/services/compute/instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ import (
infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/clients"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
)

Expand Down Expand Up @@ -77,47 +76,6 @@ func (m *gomegaMockMatcher) Matches(x interface{}) bool {
return success
}

func Test_getPortName(t *testing.T) {
type args struct {
instanceName string
opts *infrav1.PortOpts
netIndex int
}
tests := []struct {
name string
args args
want string
}{
{
name: "with nil PortOpts",
args: args{"test-1-instance", nil, 2},
want: "test-1-instance-2",
},
{
name: "with PortOpts name suffix",
args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4},
want: "test-1-instance-foo",
},
{
name: "without PortOpts name suffix",
args: args{"test-1-instance", &infrav1.PortOpts{}, 4},
want: "test-1-instance-4",
},
{
name: "with PortOpts name suffix",
args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4},
want: "test-1-instance-foo2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := networking.GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want {
t.Errorf("getPortName() = %v, want %v", got, tt.want)
}
})
}
}

func TestService_getImageID(t *testing.T) {
const imageIDA = "ce96e584-7ebc-46d6-9e55-987d72e3806c"
const imageIDB = "8f536889-5198-42d7-8314-cb78f4f4755c"
Expand Down Expand Up @@ -976,7 +934,7 @@ func TestService_ReconcileInstance(t *testing.T) {
}

// Call CreateInstance with a reduced retry interval to speed up the test
_, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond)
_, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond, []string{})
if (err != nil) != tt.wantErr {
t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr)
return
Expand Down
Loading

0 comments on commit d9d0d6f

Please sign in to comment.