Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Manila Shared Filesystem snapshots #73

Merged
merged 5 commits into from
Jul 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 21 additions & 3 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ jobs:
VELERO_BACKUP_NAME: my-test-backup-01
# From tag 1.25.0
CINDER_CSI_CHART_VERSION: 2.2.0
MANILA_CSI_CHART_VERSION: 2.2.0
DOCKER_IMAGE_NAME: velero-plugin-for-openstack
SWIFT_CONTAINER_NAME: my-swift-container
TESTS_DIRECTORY: tests/actions/integration-tests
Expand Down Expand Up @@ -76,7 +77,7 @@ jobs:
sudo mv "velero-v${VELERO_CLI_VERSION}-linux-amd64/velero" /usr/local/bin/velero
chmod 750 /usr/local/bin/velero
- name: Deploy DevStack
uses: EmilienM/devstack-action@v0.8
uses: EmilienM/devstack-action@v0.11
with:
branch: 'stable/yoga'
enable_workaround_docker_io: 'false'
Expand All @@ -86,8 +87,25 @@ jobs:
SWIFT_HASH=12394u39845623984j28hf93d9173
SWIFT_DEFAULT_BIND_PORT=15492
SERVICE_TIMEOUT=200
disable_all_services
enable_service key rabbit mysql s-proxy s-object s-container s-account c-bak c-api c-vol c-sch n-api n-crt n-cpu n-cond n-sch n-api-meta n-sproxy placement-api placement-client

CINDER_ISCSI_HELPER=tgtadm

enable_plugin manila https://github.com/openstack/manila stable/yoga
# LVM Backend config options
MANILA_SERVICE_IMAGE_ENABLED=False
SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver
MANILA_ENABLED_BACKENDS=chicago,denver
MANILA_BACKEND1_CONFIG_GROUP_NAME=chicago
MANILA_BACKEND2_CONFIG_GROUP_NAME=denver
MANILA_SHARE_BACKEND1_NAME=CHICAGO
MANILA_SHARE_BACKEND2_NAME=DENVER
MANILA_OPTGROUP_chicago_driver_handles_share_servers=False
MANILA_OPTGROUP_denver_driver_handles_share_servers=False
SHARE_BACKING_FILE_SIZE=32000M
MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True'
MANILA_CONFIGURE_DEFAULT_TYPES=True
MANILA_INSTALL_TEMPEST_PLUGIN_SYSTEMWIDE=false
- name: Prepare Swift container for velero backups
run: |
source "${{ github.workspace }}/devstack/openrc"
Expand All @@ -113,7 +131,7 @@ jobs:
--wait-for-jobs
- name: Create test backup and validate it
run: |
# Create Cinder PVC
# Create Cinder/Manila PVC
# Checks
velero backup-location get
velero snapshot-location get
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.idea

# Dependency directories (remove the comment below to include it)
# vendor/
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -276,8 +276,15 @@ configuration:
# # resticRepoPrefix: swift:<CONTAINER_NAME>:/<PATH>
# resticRepoPrefix: swift:my-awesome-container:/restic # Example
volumeSnapshotLocation:
# for Cinder block storage
- name: cinder
provider: community.openstack.org/openstack
# for Manila shared filesystem storage
- name: manila
provider: community.openstack.org/openstack-manila
config:
# optional Manila CSI driver name (default: nfs.manila.csi.openstack.org)
driver: ceph.manila.csi.openstack.org
initContainers:
- name: velero-plugin-openstack
image: lirt/velero-plugin-for-openstack:v0.5.2
Expand Down
6 changes: 6 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package main

import (
"github.com/Lirt/velero-plugin-for-openstack/src/cinder"
"github.com/Lirt/velero-plugin-for-openstack/src/manila"
"github.com/Lirt/velero-plugin-for-openstack/src/swift"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
Expand All @@ -13,6 +14,7 @@ func main() {
BindFlags(pflag.CommandLine).
RegisterObjectStore("community.openstack.org/openstack", newSwiftObjectStore).
RegisterVolumeSnapshotter("community.openstack.org/openstack", newCinderBlockStore).
RegisterVolumeSnapshotter("community.openstack.org/openstack-manila", newManilaFSStore).
Serve()
}

Expand All @@ -23,3 +25,7 @@ func newSwiftObjectStore(logger logrus.FieldLogger) (interface{}, error) {
func newCinderBlockStore(logger logrus.FieldLogger) (interface{}, error) {
return cinder.NewBlockStore(logger), nil
}

func newManilaFSStore(logger logrus.FieldLogger) (interface{}, error) {
return manila.NewFSStore(logger), nil
}
69 changes: 44 additions & 25 deletions src/cinder/block_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)

const (
snapshotReadyTimeout = 300
)

// BlockStore is a plugin for containing state for the Cinder Block Storage
type BlockStore struct {
client *gophercloud.ServiceClient
Expand Down Expand Up @@ -76,7 +80,6 @@ func (b *BlockStore) Init(config map[string]string) error {
// availability zone, initialized from the provided snapshot and with the specified type.
// IOPS is ignored as it is not used in Cinder.
func (b *BlockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) {
snapshotReadyTimeout := 300
logWithFields := b.log.WithFields(logrus.Fields{
"snapshotID": snapshotID,
"volumeType": volumeType,
Expand Down Expand Up @@ -108,17 +111,16 @@ func (b *BlockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ s
SnapshotID: snapshotID,
}

var cinderVolume *volumes.Volume
cinderVolume, err = volumes.Create(b.client, opts).Extract()
volume, err := volumes.Create(b.client, opts).Extract()
if err != nil {
logWithFields.Error("failed to create volume from snapshot")
return "", fmt.Errorf("failed to create volume %v from snapshot %v: %w", volumeName, snapshotID, err)
}

logWithFields.WithFields(logrus.Fields{
"cinderVolumeID": cinderVolume.ID,
"volumeID": volume.ID,
}).Info("Backup volume was created")
return cinderVolume.ID, nil
return volume.ID, nil
}

// GetVolumeInfo returns type of the specified volume in the given availability zone.
Expand Down Expand Up @@ -148,31 +150,32 @@ func (b *BlockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err e
logWithFields.Info("BlockStore.IsVolumeReady called")

// Get volume object from Cinder
cinderVolume, err := volumes.Get(b.client, volumeID).Extract()
volume, err := volumes.Get(b.client, volumeID).Extract()
if err != nil {
logWithFields.Error("failed to get volume from cinder")
return false, fmt.Errorf("failed to get volume %v from cinder: %w", volumeID, err)
}

// Ready states:
// https://github.com/openstack/cinder/blob/master/api-ref/source/v3/volumes-v3-volumes.inc#volumes-volumes
if cinderVolume.Status == "available" || cinderVolume.Status == "in-use" {
if volume.Status == "available" || volume.Status == "in-use" {
return true, nil
}

// Volume is not in one of the "ready" states
return false, fmt.Errorf("volume %v is not in ready state, the status is %v", volumeID, cinderVolume.Status)
return false, fmt.Errorf("volume %v is not in ready state, the status is %v", volumeID, volume.Status)
}

// CreateSnapshot creates a snapshot of the specified volume, and applies any provided
// set of tags to the snapshot.
func (b *BlockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
snapshotName := fmt.Sprintf("%s.snap.%s", volumeID, strconv.FormatUint(utils.Rand.Uint64(), 10))
logWithFields := b.log.WithFields(logrus.Fields{
"snapshotName": snapshotName,
"volumeID": volumeID,
"volumeAZ": volumeAZ,
"tags": tags,
"snapshotName": snapshotName,
"volumeID": volumeID,
"volumeAZ": volumeAZ,
"tags": tags,
"snapshotReadyTimeout": snapshotReadyTimeout,
})
logWithFields.Info("BlockStore.CreateSnapshot called")

Expand All @@ -184,17 +187,23 @@ func (b *BlockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s
Force: true,
}

// Note: we will wait for snapshot to be in ready state in CreateVolumeForSnapshot()
createResult, err := snapshots.Create(b.client, opts).Extract()
snapshot, err := snapshots.Create(b.client, opts).Extract()
if err != nil {
logWithFields.Error("failed to create snapshot from volume")
return "", fmt.Errorf("failed to create snapshot %v from volume %v: %w", snapshotName, volumeID, err)
}
snapshotID := createResult.ID

err = snapshots.WaitForStatus(b.client, snapshot.ID, "available", snapshotReadyTimeout)
if err != nil {
logWithFields.Error("snapshot didn't get into 'available' state within the time limit")
return "", fmt.Errorf("snapshot %v didn't get into 'available' state within the time limit: %w", snapshot.ID, err)
}
logWithFields.Info("Snapshot is in 'available' state")

logWithFields.WithFields(logrus.Fields{
"snapshotID": snapshotID,
"snapshotID": snapshot.ID,
}).Info("Snapshot finished successfuly")
return snapshotID, nil
return snapshot.ID, nil
}

// DeleteSnapshot deletes the specified volume snapshot.
Expand All @@ -207,6 +216,11 @@ func (b *BlockStore) DeleteSnapshot(snapshotID string) error {
// Delete snapshot from Cinder
err := snapshots.Delete(b.client, snapshotID).ExtractErr()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
logWithFields.Info("snapshot is already deleted")
return nil
}
logWithFields.Error("failed to delete snapshot")
return fmt.Errorf("failed to delete snapshot %v: %w", snapshotID, err)
}

Expand All @@ -225,16 +239,21 @@ func (b *BlockStore) GetVolumeID(unstructuredPV runtime.Unstructured) (string, e
return "", fmt.Errorf("failed to convert from unstructured PV: %w", err)
}

var volumeID string
if pv.Spec.Cinder != nil {
volumeID = pv.Spec.Cinder.VolumeID
} else if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" {
volumeID = pv.Spec.CSI.VolumeHandle
} else {
return "", fmt.Errorf("persistent volume is missing 'spec.cinder.volumeID' or PV driver ('spec.csi.driver') doesn't match supported drivers(cinder.csi.openstack.org, disk.csi.everest.io)")
return pv.Spec.Cinder.VolumeID, nil
}

return volumeID, nil
if pv.Spec.CSI == nil {
return "", nil
}

if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" {
return pv.Spec.CSI.VolumeHandle, nil
}

b.log.Infof("Unable to handle CSI driver: %s", pv.Spec.CSI.Driver)
Lirt marked this conversation as resolved.
Show resolved Hide resolved

return "", nil
}

// SetVolumeID sets the specific identifier for the PersistentVolume.
Expand All @@ -252,7 +271,7 @@ func (b *BlockStore) SetVolumeID(unstructuredPV runtime.Unstructured, volumeID s

if pv.Spec.Cinder != nil {
pv.Spec.Cinder.VolumeID = volumeID
} else if pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io" {
} else if pv.Spec.CSI != nil && (pv.Spec.CSI.Driver == "cinder.csi.openstack.org" || pv.Spec.CSI.Driver == "disk.csi.everest.io") {
pv.Spec.CSI.VolumeHandle = volumeID
} else {
return nil, fmt.Errorf("persistent volume is missing 'spec.cinder.volumeID' or PV driver ('spec.csi.driver') doesn't match supported drivers(cinder.csi.openstack.org, disk.csi.everest.io)")
Expand Down
Loading