diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index ae4115d..834beda 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -33,6 +33,7 @@ jobs: VELERO_BACKUP_NAME: my-test-backup-01 # From tag 1.25.0 CINDER_CSI_CHART_VERSION: 2.2.0 + MANILA_CSI_CHART_VERSION: 2.2.0 DOCKER_IMAGE_NAME: velero-plugin-for-openstack SWIFT_CONTAINER_NAME: my-swift-container TESTS_DIRECTORY: tests/actions/integration-tests @@ -76,7 +77,7 @@ jobs: sudo mv "velero-v${VELERO_CLI_VERSION}-linux-amd64/velero" /usr/local/bin/velero chmod 750 /usr/local/bin/velero - name: Deploy DevStack - uses: EmilienM/devstack-action@v0.8 + uses: EmilienM/devstack-action@v0.11 with: branch: 'stable/yoga' enable_workaround_docker_io: 'false' @@ -86,8 +87,25 @@ jobs: SWIFT_HASH=12394u39845623984j28hf93d9173 SWIFT_DEFAULT_BIND_PORT=15492 SERVICE_TIMEOUT=200 - disable_all_services enable_service key rabbit mysql s-proxy s-object s-container s-account c-bak c-api c-vol c-sch n-api n-crt n-cpu n-cond n-sch n-api-meta n-sproxy placement-api placement-client + + CINDER_ISCSI_HELPER=tgtadm + + enable_plugin manila https://github.com/openstack/manila stable/yoga + # LVM Backend config options + MANILA_SERVICE_IMAGE_ENABLED=False + SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver + MANILA_ENABLED_BACKENDS=chicago,denver + MANILA_BACKEND1_CONFIG_GROUP_NAME=chicago + MANILA_BACKEND2_CONFIG_GROUP_NAME=denver + MANILA_SHARE_BACKEND1_NAME=CHICAGO + MANILA_SHARE_BACKEND2_NAME=DENVER + MANILA_OPTGROUP_chicago_driver_handles_share_servers=False + MANILA_OPTGROUP_denver_driver_handles_share_servers=False + SHARE_BACKING_FILE_SIZE=32000M + MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' + MANILA_CONFIGURE_DEFAULT_TYPES=True + MANILA_INSTALL_TEMPEST_PLUGIN_SYSTEMWIDE=false - name: Prepare Swift container for velero backups run: | source "${{ github.workspace }}/devstack/openrc" @@ -113,7 +131,7 @@ jobs: --wait-for-jobs - name: Create test backup and validate it run: | - # Create Cinder PVC + # Create Cinder/Manila PVC # Checks velero backup-location get velero snapshot-location get diff --git a/.gitignore b/.gitignore index 66fd13c..33ce4c4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +.idea # Dependency directories (remove the comment below to include it) # vendor/ diff --git a/README.md b/README.md index 5867a4b..f3fa989 100644 --- a/README.md +++ b/README.md @@ -276,8 +276,15 @@ configuration: # # resticRepoPrefix: swift::/ # resticRepoPrefix: swift:my-awesome-container:/restic # Example volumeSnapshotLocation: + # for Cinder block storage - name: cinder provider: community.openstack.org/openstack + # for Manila shared filesystem storage + - name: manila + provider: community.openstack.org/openstack-manila + config: + # optional Manila CSI driver name (default: nfs.manila.csi.openstack.org) + driver: ceph.manila.csi.openstack.org initContainers: - name: velero-plugin-openstack image: lirt/velero-plugin-for-openstack:v0.5.2 diff --git a/main.go b/main.go index 574892e..cd68b30 100644 --- a/main.go +++ b/main.go @@ -2,6 +2,7 @@ package main import ( "github.com/Lirt/velero-plugin-for-openstack/src/cinder" + "github.com/Lirt/velero-plugin-for-openstack/src/manila" "github.com/Lirt/velero-plugin-for-openstack/src/swift" "github.com/sirupsen/logrus" "github.com/spf13/pflag" @@ -13,6 +14,7 @@ func main() { BindFlags(pflag.CommandLine). RegisterObjectStore("community.openstack.org/openstack", newSwiftObjectStore). RegisterVolumeSnapshotter("community.openstack.org/openstack", newCinderBlockStore). + RegisterVolumeSnapshotter("community.openstack.org/openstack-manila", newManilaFSStore). Serve() } @@ -23,3 +25,7 @@ func newSwiftObjectStore(logger logrus.FieldLogger) (interface{}, error) { func newCinderBlockStore(logger logrus.FieldLogger) (interface{}, error) { return cinder.NewBlockStore(logger), nil } + +func newManilaFSStore(logger logrus.FieldLogger) (interface{}, error) { + return manila.NewFSStore(logger), nil +} diff --git a/src/cinder/block_store.go b/src/cinder/block_store.go index df1dcc0..2053c97 100644 --- a/src/cinder/block_store.go +++ b/src/cinder/block_store.go @@ -17,6 +17,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + snapshotReadyTimeout = 300 +) + // BlockStore is a plugin for containing state for the Cinder Block Storage type BlockStore struct { client *gophercloud.ServiceClient @@ -76,7 +80,6 @@ func (b *BlockStore) Init(config map[string]string) error { // availability zone, initialized from the provided snapshot and with the specified type. // IOPS is ignored as it is not used in Cinder. func (b *BlockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) { - snapshotReadyTimeout := 300 logWithFields := b.log.WithFields(logrus.Fields{ "snapshotID": snapshotID, "volumeType": volumeType, @@ -108,17 +111,16 @@ func (b *BlockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ s SnapshotID: snapshotID, } - var cinderVolume *volumes.Volume - cinderVolume, err = volumes.Create(b.client, opts).Extract() + volume, err := volumes.Create(b.client, opts).Extract() if err != nil { logWithFields.Error("failed to create volume from snapshot") return "", fmt.Errorf("failed to create volume %v from snapshot %v: %w", volumeName, snapshotID, err) } logWithFields.WithFields(logrus.Fields{ - "cinderVolumeID": cinderVolume.ID, + "volumeID": volume.ID, }).Info("Backup volume was created") - return cinderVolume.ID, nil + return volume.ID, nil } // GetVolumeInfo returns type of the specified volume in the given availability zone. @@ -148,7 +150,7 @@ func (b *BlockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err e logWithFields.Info("BlockStore.IsVolumeReady called") // Get volume object from Cinder - cinderVolume, err := volumes.Get(b.client, volumeID).Extract() + volume, err := volumes.Get(b.client, volumeID).Extract() if err != nil { logWithFields.Error("failed to get volume from cinder") return false, fmt.Errorf("failed to get volume %v from cinder: %w", volumeID, err) @@ -156,12 +158,12 @@ func (b *BlockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err e // Ready states: // https://github.com/openstack/cinder/blob/master/api-ref/source/v3/volumes-v3-volumes.inc#volumes-volumes - if cinderVolume.Status == "available" || cinderVolume.Status == "in-use" { + if volume.Status == "available" || volume.Status == "in-use" { return true, nil } // Volume is not in one of the "ready" states - return false, fmt.Errorf("volume %v is not in ready state, the status is %v", volumeID, cinderVolume.Status) + return false, fmt.Errorf("volume %v is not in ready state, the status is %v", volumeID, volume.Status) } // CreateSnapshot creates a snapshot of the specified volume, and applies any provided @@ -169,10 +171,11 @@ func (b *BlockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err e func (b *BlockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) { snapshotName := fmt.Sprintf("%s.snap.%s", volumeID, strconv.FormatUint(utils.Rand.Uint64(), 10)) logWithFields := b.log.WithFields(logrus.Fields{ - "snapshotName": snapshotName, - "volumeID": volumeID, - "volumeAZ": volumeAZ, - "tags": tags, + "snapshotName": snapshotName, + "volumeID": volumeID, + "volumeAZ": volumeAZ, + "tags": tags, + "snapshotReadyTimeout": snapshotReadyTimeout, }) logWithFields.Info("BlockStore.CreateSnapshot called") @@ -184,17 +187,23 @@ func (b *BlockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s Force: true, } - // Note: we will wait for snapshot to be in ready state in CreateVolumeForSnapshot() - createResult, err := snapshots.Create(b.client, opts).Extract() + snapshot, err := snapshots.Create(b.client, opts).Extract() if err != nil { + logWithFields.Error("failed to create snapshot from volume") return "", fmt.Errorf("failed to create snapshot %v from volume %v: %w", snapshotName, volumeID, err) } - snapshotID := createResult.ID + + err = snapshots.WaitForStatus(b.client, snapshot.ID, "available", snapshotReadyTimeout) + if err != nil { + logWithFields.Error("snapshot didn't get into 'available' state within the time limit") + return "", fmt.Errorf("snapshot %v didn't get into 'available' state within the time limit: %w", snapshot.ID, err) + } + logWithFields.Info("Snapshot is in 'available' state") logWithFields.WithFields(logrus.Fields{ - "snapshotID": snapshotID, + "snapshotID": snapshot.ID, }).Info("Snapshot finished successfuly") - return snapshotID, nil + return snapshot.ID, nil } // DeleteSnapshot deletes the specified volume snapshot. @@ -207,6 +216,11 @@ func (b *BlockStore) DeleteSnapshot(snapshotID string) error { // Delete snapshot from Cinder err := snapshots.Delete(b.client, snapshotID).ExtractErr() if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + logWithFields.Info("snapshot is already deleted") + return nil + } + logWithFields.Error("failed to delete snapshot") return fmt.Errorf("failed to delete snapshot %v: %w", snapshotID, err) } @@ -225,16 +239,21 @@ func (b *BlockStore) GetVolumeID(unstructuredPV runtime.Unstructured) (string, e return "", fmt.Errorf("failed to convert from unstructured PV: %w", err) } - var volumeID string if pv.Spec.Cinder != nil { - volumeID = pv.Spec.Cinder.VolumeID - } else if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" { - volumeID = pv.Spec.CSI.VolumeHandle - } else { - return "", fmt.Errorf("persistent volume is missing 'spec.cinder.volumeID' or PV driver ('spec.csi.driver') doesn't match supported drivers(cinder.csi.openstack.org, disk.csi.everest.io)") + return pv.Spec.Cinder.VolumeID, nil } - return volumeID, nil + if pv.Spec.CSI == nil { + return "", nil + } + + if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" { + return pv.Spec.CSI.VolumeHandle, nil + } + + b.log.Infof("Unable to handle CSI driver: %s", pv.Spec.CSI.Driver) + + return "", nil } // SetVolumeID sets the specific identifier for the PersistentVolume. @@ -252,7 +271,7 @@ func (b *BlockStore) SetVolumeID(unstructuredPV runtime.Unstructured, volumeID s if pv.Spec.Cinder != nil { pv.Spec.Cinder.VolumeID = volumeID - } else if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" { + } else if pv.Spec.CSI != nil && (pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io") { pv.Spec.CSI.VolumeHandle = volumeID } else { return nil, fmt.Errorf("persistent volume is missing 'spec.cinder.volumeID' or PV driver ('spec.csi.driver') doesn't match supported drivers(cinder.csi.openstack.org, disk.csi.everest.io)") diff --git a/src/manila/fs_store.go b/src/manila/fs_store.go new file mode 100644 index 0000000..c01b581 --- /dev/null +++ b/src/manila/fs_store.go @@ -0,0 +1,429 @@ +package manila + +import ( + "fmt" + "os" + "strconv" + + "github.com/Lirt/velero-plugin-for-openstack/src/utils" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shareaccessrules" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots" + "github.com/sirupsen/logrus" + velerovolumesnapshotter "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + defaultCsiManilaDriverName = "nfs.manila.csi.openstack.org" + minSupportedMicroversion = "2.7" + getAccessRulesMicroversion = "2.45" + shareReadyTimeout = 300 + snapshotReadyTimeout = 300 +) + +// FSStore is a plugin for containing state for the Manila Shared Filesystem +type FSStore struct { + client *gophercloud.ServiceClient + provider *gophercloud.ProviderClient + config map[string]string + log logrus.FieldLogger +} + +// NewFSStore instantiates a Manila Shared Filesystem Snapshotter. +func NewFSStore(log logrus.FieldLogger) *FSStore { + return &FSStore{ + log: log, + } +} + +var _ velerovolumesnapshotter.VolumeSnapshotter = (*FSStore)(nil) + +// Init prepares the Manila VolumeSnapshotter for usage using the provided map of +// configuration key-value pairs. It returns an error if the VolumeSnapshotter +// cannot be initialized from the provided config. +func (b *FSStore) Init(config map[string]string) error { + b.log.WithFields(logrus.Fields{ + "config": config, + }).Info("FSStore.Init called") + b.config = config + + // set default Manila CSI driver name + b.config["driver"] = utils.GetConf(b.config, "driver", defaultCsiManilaDriverName) + + // Authenticate to Openstack + err := utils.Authenticate(&b.provider, "manila", config, b.log) + if err != nil { + return fmt.Errorf("failed to authenticate against OpenStack in shared filesystem plugin: %w", err) + } + + // If we haven't set client before or we use multiple clouds - get new client + if b.client == nil || config["cloud"] != "" { + region, ok := os.LookupEnv("OS_REGION_NAME") + if !ok { + if config["region"] != "" { + region = config["region"] + } else { + region = "RegionOne" + } + } + b.client, err = openstack.NewSharedFileSystemV2(b.provider, gophercloud.EndpointOpts{ + Region: region, + }) + if err != nil { + return fmt.Errorf("failed to create manila storage client: %w", err) + } + + logWithFields := b.log.WithFields(logrus.Fields{ + "endpoint": b.client.Endpoint, + "region": region, + }) + + // set minimum supported Manila API microversion by default + b.client.Microversion = minSupportedMicroversion + if mv, err := b.getManilaMicroversion(); err != nil { + logWithFields.Warningf("Failed to obtain supported Manila microversions (using the default one: %v): %v", b.client.Microversion, err) + } else { + ok, err := utils.CompareMicroversions("lte", getAccessRulesMicroversion, mv) + if err != nil { + logWithFields.Warningf("Failed to compare supported Manila microversions (using the default one: %v): %v", b.client.Microversion, err) + } + + if ok { + b.client.Microversion = getAccessRulesMicroversion + logWithFields.Infof("Setting the supported %v microversion", b.client.Microversion) + } + } + + logWithFields.Info("Successfully created shared filesystem service client") + } + + return nil +} + +// CreateVolumeFromSnapshot creates a new volume in the specified +// availability zone, initialized from the provided snapshot and with the specified type. +// IOPS is ignored as it is not used in Manila. +func (b *FSStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) { + logWithFields := b.log.WithFields(logrus.Fields{ + "snapshotID": snapshotID, + "volumeType": volumeType, + "volumeAZ": volumeAZ, + "shareReadyTimeout": shareReadyTimeout, + "snapshotReadyTimeout": snapshotReadyTimeout, + }) + logWithFields.Info("FSStore.CreateVolumeFromSnapshot called") + + volumeName := fmt.Sprintf("%s.backup.%s", snapshotID, strconv.FormatUint(utils.Rand.Uint64(), 10)) + // Make sure snapshot is in available status + // Possible values for snapshot status: + // https://github.com/openstack/manila/blob/master/api-ref/source/snapshots.inc#share-snapshots + logWithFields.Info("Waiting for snapshot to be in 'available' status") + + snapshot, err := b.waitForSnapshotStatus(snapshotID, "available", snapshotReadyTimeout) + if err != nil { + logWithFields.Error("snapshot didn't get into 'available' status within the time limit") + return "", fmt.Errorf("snapshot %v didn't get into 'available' status within the time limit: %w", snapshotID, err) + } + logWithFields.Info("Snapshot is in 'available' status") + + // get original share with its metadata + originShare, err := shares.Get(b.client, snapshot.ShareID).Extract() + if err != nil { + logWithFields.Errorf("failed to get original share %v from manila", snapshot.ShareID) + return "", fmt.Errorf("failed to get original share %v from manila: %w", snapshot.ShareID, err) + } + + // get original share access rule + rule, err := b.getShareAccessRule(logWithFields, snapshot.ShareID) + if err != nil { + return "", err + } + + // Create Manila Share from snapshot (backup) + logWithFields.Infof("Starting to create share from snapshot") + opts := &shares.CreateOpts{ + ShareProto: snapshot.ShareProto, + Size: snapshot.Size, + AvailabilityZone: volumeAZ, + Name: volumeName, + SnapshotID: snapshotID, + Metadata: originShare.Metadata, + } + share, err := shares.Create(b.client, opts).Extract() + if err != nil { + logWithFields.Errorf("failed to create share from snapshot") + return "", fmt.Errorf("failed to create share %v from snapshot %v: %w", volumeName, snapshotID, err) + } + + // Make sure share is in available status + // Possible values for share status: + // https://github.com/openstack/manila/blob/master/api-ref/source/shares.inc#shares + logWithFields.Info("Waiting for snapshot to be in 'available' status") + + _, err = b.waitForShareStatus(share.ID, "available", shareReadyTimeout) + if err != nil { + logWithFields.Error("share didn't get into 'available' status within the time limit") + return "", fmt.Errorf("share %v didn't get into 'available' status within the time limit: %w", share.ID, err) + } + + // grant the only one supported share access from the original share + accessOpts := &shares.GrantAccessOpts{ + AccessType: rule.AccessType, + AccessTo: rule.AccessTo, + AccessLevel: rule.AccessLevel, + } + shareAccess, err := shares.GrantAccess(b.client, share.ID, accessOpts).Extract() + if err != nil { + logWithFields.Error("failed to grant an access to manila share") + return "", fmt.Errorf("failed to grant an access to manila share %v: %w", share.ID, err) + } + + logWithFields.WithFields(logrus.Fields{ + "shareID": share.ID, + "shareAccessID": shareAccess.ID, + }).Info("Backup share was created") + return share.ID, nil +} + +// GetVolumeInfo returns type of the specified volume in the given availability zone. +// IOPS is not used as it is not supported by Manila. +func (b *FSStore) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { + logWithFields := b.log.WithFields(logrus.Fields{ + "volumeID": volumeID, + "volumeAZ": volumeAZ, + }) + logWithFields.Info("FSStore.GetVolumeInfo called") + + share, err := shares.Get(b.client, volumeID).Extract() + if err != nil { + logWithFields.Error("failed to get share from manila") + return "", nil, fmt.Errorf("failed to get share %v from manila: %w", volumeID, err) + } + + return share.VolumeType, nil, nil +} + +// IsVolumeReady Check if the volume is in one of the available statuses. +func (b *FSStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) { + logWithFields := b.log.WithFields(logrus.Fields{ + "volumeID": volumeID, + "volumeAZ": volumeAZ, + }) + logWithFields.Info("FSStore.IsVolumeReady called") + + // Get share object from Manila + share, err := shares.Get(b.client, volumeID).Extract() + if err != nil { + logWithFields.Error("failed to get share from manila") + return false, fmt.Errorf("failed to get share %v from manila: %w", volumeID, err) + } + + // Ready statuses: + // https://github.com/openstack/manila/blob/master/api-ref/source/shares.inc#shares + if share.Status == "available" { + return true, nil + } + + // Share is not in one of the "available" statuses + return false, fmt.Errorf("share %v is not in available status, the status is %v", volumeID, share.Status) +} + +// CreateSnapshot creates a snapshot of the specified volume, and does NOT +// apply any provided set of tags to the snapshot. +func (b *FSStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) { + snapshotName := fmt.Sprintf("%s.snap.%s", volumeID, strconv.FormatUint(utils.Rand.Uint64(), 10)) + logWithFields := b.log.WithFields(logrus.Fields{ + "snapshotName": snapshotName, + "volumeID": volumeID, + "volumeAZ": volumeAZ, + "tags": tags, + "snapshotReadyTimeout": snapshotReadyTimeout, + }) + logWithFields.Info("FSStore.CreateSnapshot called") + + opts := snapshots.CreateOpts{ + Name: snapshotName, + Description: "Velero snapshot", + ShareID: volumeID, + } + + snapshot, err := snapshots.Create(b.client, opts).Extract() + if err != nil { + logWithFields.Error("failed to create snapshot from share") + return "", fmt.Errorf("failed to create snapshot %v from share %v: %w", snapshotName, volumeID, err) + } + + _, err = b.waitForSnapshotStatus(snapshot.ID, "available", snapshotReadyTimeout) + if err != nil { + logWithFields.Error("snapshot didn't get into 'available' status within the time limit") + return "", fmt.Errorf("snapshot %v didn't get into 'available' status within the time limit: %w", snapshot.ID, err) + } + logWithFields.Info("Snapshot is in 'available' status") + + logWithFields.WithFields(logrus.Fields{ + "snapshotID": snapshot.ID, + }).Info("Snapshot finished successfuly") + return snapshot.ID, nil +} + +// DeleteSnapshot deletes the specified volume snapshot. +func (b *FSStore) DeleteSnapshot(snapshotID string) error { + logWithFields := b.log.WithFields(logrus.Fields{ + "snapshotID": snapshotID, + }) + logWithFields.Info("FSStore.DeleteSnapshot called") + + // Delete snapshot from Manila + err := snapshots.Delete(b.client, snapshotID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + logWithFields.Info("snapshot is already deleted") + return nil + } + logWithFields.Error("failed to delete snapshot") + return fmt.Errorf("failed to delete snapshot %v: %w", snapshotID, err) + } + + return nil +} + +// GetVolumeID returns the specific identifier for the PersistentVolume. +func (b *FSStore) GetVolumeID(unstructuredPV runtime.Unstructured) (string, error) { + logWithFields := b.log.WithFields(logrus.Fields{ + "unstructuredPV": unstructuredPV, + }) + logWithFields.Info("FSStore.GetVolumeID called") + + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.UnstructuredContent(), pv); err != nil { + return "", fmt.Errorf("failed to convert from unstructured PV: %w", err) + } + + if pv.Spec.CSI == nil { + return "", nil + } + + if pv.Spec.CSI.Driver == b.config["driver"] { + return pv.Spec.CSI.VolumeHandle, nil + } + + b.log.Infof("Unable to handle CSI driver: %s", pv.Spec.CSI.Driver) + + return "", nil +} + +// SetVolumeID sets the specific identifier for the PersistentVolume. +func (b *FSStore) SetVolumeID(unstructuredPV runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { + logWithFields := b.log.WithFields(logrus.Fields{ + "unstructuredPV": unstructuredPV, + "volumeID": volumeID, + }) + logWithFields.Info("FSStore.SetVolumeID called") + + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.UnstructuredContent(), pv); err != nil { + return nil, fmt.Errorf("failed to convert from unstructured PV: %w", err) + } + + if pv.Spec.CSI.Driver != b.config["driver"] { + return nil, fmt.Errorf("PV driver ('spec.csi.driver') doesn't match supported driver (%s)", b.config["driver"]) + } + + // get share access rule + rule, err := b.getShareAccessRule(logWithFields, volumeID) + if err != nil { + return nil, err + } + + pv.Spec.CSI.VolumeHandle = volumeID + pv.Spec.CSI.VolumeAttributes["shareID"] = volumeID + pv.Spec.CSI.VolumeAttributes["shareAccessID"] = rule.ID + + res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pv) + if err != nil { + return nil, fmt.Errorf("failed to convert to unstructured PV: %w", err) + } + + return &unstructured.Unstructured{Object: res}, nil +} + +func (b *FSStore) getShareAccessRule(logWithFields *logrus.Entry, volumeID string) (*shares.AccessRight, error) { + var rules interface{} + var err error + // deprecated API call + if b.client.Microversion == minSupportedMicroversion { + rules, err = shares.ListAccessRights(b.client, volumeID).Extract() + } else { + rules, err = shareaccessrules.List(b.client, volumeID).Extract() + } + if err != nil { + logWithFields.Errorf("failed to list share %v access rules from manila", volumeID) + return nil, fmt.Errorf("failed to list share %v access rules from manila: %w", volumeID, err) + } + + switch rules := rules.(type) { + case []shares.AccessRight: + for _, rule := range rules { + return &rule, nil + } + case []shareaccessrules.ShareAccess: + for _, rule := range rules { + return &shares.AccessRight{ + ID: rule.ID, + ShareID: rule.ShareID, + AccessKey: rule.AccessKey, + AccessLevel: rule.AccessLevel, + AccessTo: rule.AccessTo, + AccessType: rule.AccessType, + State: rule.State, + }, nil + } + } + + logWithFields.Errorf("failed to find share %v access rules from manila", volumeID) + return nil, fmt.Errorf("failed to find share %v access rules from manila: %w", volumeID, err) +} + +func (b *FSStore) getManilaMicroversion() (string, error) { + api, err := apiversions.Get(b.client, "v2").Extract() + if err != nil { + return "", err + } + return api.Version, nil +} + +func (b *FSStore) waitForShareStatus(id, status string, secs int) (current *shares.Share, err error) { + return current, gophercloud.WaitFor(secs, func() (bool, error) { + current, err = shares.Get(b.client, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} + +func (b *FSStore) waitForSnapshotStatus(id, status string, secs int) (current *snapshots.Snapshot, err error) { + return current, gophercloud.WaitFor(secs, func() (bool, error) { + current, err = snapshots.Get(b.client, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/src/swift/object_store.go b/src/swift/object_store.go index c064ed4..3e19d99 100644 --- a/src/swift/object_store.go +++ b/src/swift/object_store.go @@ -225,13 +225,18 @@ func (o *ObjectStore) ListObjects(container, prefix string) ([]string, error) { // DeleteObject deletes object specified by object from container func (o *ObjectStore) DeleteObject(container, object string) error { - o.log.WithFields(logrus.Fields{ + logWithFields := o.log.WithFields(logrus.Fields{ "container": container, "object": object, - }).Info("ObjectStore.DeleteObject called") + }) + logWithFields.Info("ObjectStore.DeleteObject called") _, err := objects.Delete(o.client, container, object, nil).Extract() if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + logWithFields.Info("object is already deleted") + return nil + } return fmt.Errorf("failed to delete %q object from %q container: %w", object, container, err) } diff --git a/src/utils/helpers.go b/src/utils/helpers.go index e9613ab..6e2c034 100644 --- a/src/utils/helpers.go +++ b/src/utils/helpers.go @@ -1,14 +1,21 @@ package utils import ( + "fmt" "math/rand" "os" + "regexp" + "strconv" "strings" "time" ) -// Rand is used for a random generator exclusively for this go module -var Rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) +var ( + // Rand is used for a random generator exclusively for this go module + Rand = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + // regexp to parse OpenStack service microversion + mvRe = regexp.MustCompile(`^(\d+).(\d+)$`) +) // GetEnv gets value from environment variable or fallbacks to default value // This snippet is from https://stackoverflow.com/a/40326580/3323419 @@ -19,6 +26,14 @@ func GetEnv(key, fallback string) string { return fallback } +// GetConf gets a value from a config map or fallbacks to default value +func GetConf(config map[string]string, key, fallback string) string { + if value, ok := config[key]; ok && value != "" { + return value + } + return fallback +} + // ReplaceAccount replaces an endpoint account part with a new account value func ReplaceAccount(account, path string, prefixes []string) string { parts := strings.Split(path, "/") @@ -32,3 +47,52 @@ func ReplaceAccount(account, path string, prefixes []string) string { } return strings.Join(parts, "/") } + +// CompareMicroversions compares two microversions using operators: +// lte: less than or equal +// gte: greater than or equal +func CompareMicroversions(operator, want, have string) (bool, error) { + if operator != "lte" && operator != "gte" { + return false, fmt.Errorf("invalid microversions comparison %q operator, must be lte or gte", operator) + } + + w, err := microversionToInt(want) + if err != nil { + return false, err + } + + h, err := microversionToInt(have) + if err != nil { + return false, err + } + + // lte + if operator == "lte" { + if w[0] < h[0] { + return true, nil + } + + return w[0] <= h[0] && w[1] <= h[1], nil + } + + // gte + if w[0] > h[0] { + return true, nil + } + + return w[0] >= h[0] && w[1] >= h[1], nil +} + +func microversionToInt(mv string) ([]int, error) { + res := mvRe.FindAllStringSubmatch(mv, -1) + if len(res) == 1 && len(res[0]) == 3 { + ver := res[0][1:] + major, _ := strconv.Atoi(ver[0]) + minor, _ := strconv.Atoi(ver[1]) + return []int{ + major, + minor, + }, nil + } + return nil, fmt.Errorf("invalid microversion string: %v", mv) +} diff --git a/src/utils/helpers_test.go b/src/utils/helpers_test.go index 3c3c4b9..2ab3cd0 100644 --- a/src/utils/helpers_test.go +++ b/src/utils/helpers_test.go @@ -62,3 +62,43 @@ func TestRand(t *testing.T) { t.Errorf("failed to verify random seed generator %v != %v", a, b) } } + +func TestCompareMicroversions(t *testing.T) { + type vals struct { + want string + op string + have string + res bool + } + tests := []vals{ + { + "2.7", "lte", "2.50", true, + }, + { + "1.7", "lte", "2.50", true, + }, + { + "3.7", "lte", "2.50", false, + }, + { + "2.50", "lte", "2.50", true, + }, + { + "2.7", "gte", "2.50", false, + }, + { + "1.50", "gte", "2.50", false, + }, + { + "2.50", "gte", "2.50", true, + }, + } + + for i, test := range tests { + if v, err := CompareMicroversions(test.op, test.want, test.have); err != nil { + t.Errorf("[%d] test failed: %v", i, err) + } else if test.res != v { + t.Errorf("[%d] test failed: expected %t, got %t", i, test.res, v) + } + } +} diff --git a/tests/actions/integration-tests/velero-helm-values.yaml b/tests/actions/integration-tests/velero-helm-values.yaml index c6998f4..def4d6c 100644 --- a/tests/actions/integration-tests/velero-helm-values.yaml +++ b/tests/actions/integration-tests/velero-helm-values.yaml @@ -10,6 +10,8 @@ configuration: volumeSnapshotLocation: - provider: community.openstack.org/openstack name: cinder + - provider: community.openstack.org/openstack-manila + name: manila initContainers: - name: velero-plugin-openstack image: ""