Skip to content

Commit

Permalink
Merge pull request #7087 from blackpiglet/cherry_pick_7061_to_1.12
Browse files Browse the repository at this point in the history
[Cherry-pick][release-1.12]Add DataUpload Result and CSI VolumeSnapshot check for restore PV.
  • Loading branch information
qiuming-best authored Nov 13, 2023
2 parents d139922 + 5923046 commit 86e34ee
Show file tree
Hide file tree
Showing 8 changed files with 375 additions and 8 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/7087-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add DataUpload Result and CSI VolumeSnapshot check for restore PV.
5 changes: 5 additions & 0 deletions pkg/builder/volume_snapshot_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,3 +67,8 @@ func (v *VolumeSnapshotBuilder) BoundVolumeSnapshotContentName(vscName string) *
v.object.Status.BoundVolumeSnapshotContentName = &vscName
return v
}

func (v *VolumeSnapshotBuilder) SourcePVC(name string) *VolumeSnapshotBuilder {
v.object.Spec.Source.PersistentVolumeClaimName = &name
return v
}
6 changes: 6 additions & 0 deletions pkg/controller/restore_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,11 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
return errors.Wrap(err, "error fetching volume snapshots metadata")
}

csiVolumeSnapshots, err := backupStore.GetCSIVolumeSnapshots(restore.Spec.BackupName)
if err != nil {
return errors.Wrap(err, "fail to fetch CSI VolumeSnapshots metadata")
}

restoreLog.Info("starting restore")

var podVolumeBackups []*api.PodVolumeBackup
Expand All @@ -531,6 +536,7 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
BackupReader: backupFile,
ResourceModifiers: resourceModifiers,
DisableInformerCache: r.disableInformerCache,
CSIVolumeSnapshots: csiVolumeSnapshots,
}
restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager)

Expand Down
2 changes: 2 additions & 0 deletions pkg/controller/restore_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"testing"
"time"

snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -471,6 +472,7 @@ func TestRestoreReconcile(t *testing.T) {
}
if test.expectedRestorerCall != nil {
backupStore.On("GetBackupContents", test.backup.Name).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil)
backupStore.On("GetCSIVolumeSnapshots", test.backup.Name).Return([]*snapshotv1api.VolumeSnapshot{}, nil)

restorer.On("RestoreWithResolvers", mock.Anything, mock.Anything, mock.Anything, mock.Anything,
mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
Expand Down
2 changes: 2 additions & 0 deletions pkg/restore/request.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"io"
"sort"

snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime"

Expand Down Expand Up @@ -60,6 +61,7 @@ type Request struct {
itemOperationsList *[]*itemoperation.RestoreOperation
ResourceModifiers *resourcemodifiers.ResourceModifiers
DisableInformerCache bool
CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot
}

type restoredItemStatus struct {
Expand Down
94 changes: 91 additions & 3 deletions pkg/restore/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"time"

"github.com/google/uuid"
snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -298,6 +299,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
pvsToProvision: sets.NewString(),
pvRestorer: pvRestorer,
volumeSnapshots: req.VolumeSnapshots,
csiVolumeSnapshots: req.CSIVolumeSnapshots,
podVolumeBackups: req.PodVolumeBackups,
resourceTerminatingTimeout: kr.resourceTerminatingTimeout,
resourceTimeout: kr.resourceTimeout,
Expand Down Expand Up @@ -347,6 +349,7 @@ type restoreContext struct {
pvsToProvision sets.String
pvRestorer PVRestorer
volumeSnapshots []*volume.Snapshot
csiVolumeSnapshots []*snapshotv1api.VolumeSnapshot
podVolumeBackups []*velerov1api.PodVolumeBackup
resourceTerminatingTimeout time.Duration
resourceTimeout time.Duration
Expand Down Expand Up @@ -1287,23 +1290,59 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
}

case hasPodVolumeBackup(obj, ctx):
ctx.log.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.")
ctx.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.")
ctx.pvsToProvision.Insert(name)

// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs, itemExists

case hasCSIVolumeSnapshot(ctx, obj):
ctx.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Infof("Dynamically re-provisioning persistent volume because it has a related CSI VolumeSnapshot.")
ctx.pvsToProvision.Insert(name)

// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs, itemExists

case hasSnapshotDataUpload(ctx, obj):
ctx.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Infof("Dynamically re-provisioning persistent volume because it has a related snapshot DataUpload.")
ctx.pvsToProvision.Insert(name)

// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs, itemExists

case hasDeleteReclaimPolicy(obj.Object):
ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.")
ctx.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.")
ctx.pvsToProvision.Insert(name)

// Return early because we don't want to restore the PV itself, we
// want to dynamically re-provision it.
return warnings, errs, itemExists

default:
ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.")
ctx.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
"groupResource": groupResource.String(),
}).Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.")

// Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary.
_, err = remapClaimRefNS(ctx, obj)
Expand Down Expand Up @@ -1929,6 +1968,55 @@ func hasSnapshot(pvName string, snapshots []*volume.Snapshot) bool {
return false
}

func hasCSIVolumeSnapshot(ctx *restoreContext, unstructuredPV *unstructured.Unstructured) bool {
pv := new(v1.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, pv); err != nil {
ctx.log.WithError(err).Warnf("Unable to convert PV from unstructured to structured")
return false
}

for _, vs := range ctx.csiVolumeSnapshots {
if pv.Spec.ClaimRef.Name == *vs.Spec.Source.PersistentVolumeClaimName &&
pv.Spec.ClaimRef.Namespace == vs.Namespace {
return true
}
}
return false
}

func hasSnapshotDataUpload(ctx *restoreContext, unstructuredPV *unstructured.Unstructured) bool {
pv := new(v1.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, pv); err != nil {
ctx.log.WithError(err).Warnf("Unable to convert PV from unstructured to structured")
return false
}

if pv.Spec.ClaimRef == nil {
return false
}

dataUploadResultList := new(v1.ConfigMapList)
err := ctx.kbClient.List(go_context.TODO(), dataUploadResultList, &crclient.ListOptions{
LabelSelector: labels.SelectorFromSet(map[string]string{
velerov1api.RestoreUIDLabel: label.GetValidName(string(ctx.restore.GetUID())),
velerov1api.PVCNamespaceNameLabel: label.GetValidName(pv.Spec.ClaimRef.Namespace + "." + pv.Spec.ClaimRef.Name),
velerov1api.ResourceUsageLabel: label.GetValidName(string(velerov1api.VeleroResourceUsageDataUploadResult)),
}),
})
if err != nil {
ctx.log.WithError(err).Warnf("Fail to list DataUpload result CM.")
return false
}

if len(dataUploadResultList.Items) != 1 {
ctx.log.WithError(fmt.Errorf("dataupload result number is not expected")).
Warnf("Got %d DataUpload result. Expect one.", len(dataUploadResultList.Items))
return false
}

return true
}

func hasPodVolumeBackup(unstructuredPV *unstructured.Unstructured, ctx *restoreContext) bool {
if len(ctx.podVolumeBackups) == 0 {
return false
Expand Down
Loading

0 comments on commit 86e34ee

Please sign in to comment.