diff --git a/internal/controller/providerconfig/healthcheck/healthcheck_controller.go b/internal/controller/providerconfig/healthcheck/healthcheck_controller.go index b42a7e78..55ca6a15 100644 --- a/internal/controller/providerconfig/healthcheck/healthcheck_controller.go +++ b/internal/controller/providerconfig/healthcheck/healthcheck_controller.go @@ -18,7 +18,6 @@ package healthcheck import ( "context" - "fmt" "strings" "time" @@ -27,6 +26,7 @@ import ( v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "go.opentelemetry.io/otel" + "golang.org/x/sync/errgroup" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -50,7 +50,6 @@ const ( errPutHealthCheckFile = "failed to upload health check file" errGetHealthCheckFile = "failed to get health check file" errCreateHealthCheckBucket = "failed to create health check bucket" - errDoHealthCheck = "failed to perform health check" errHealthCheckCleanup = "failed to perform health check cleanup" errDeleteHealthCheckBucket = "failed to delete health check bucket" errDeleteLCValidationBucket = "failed to delete lifecycle configuration validation bucket" @@ -60,6 +59,7 @@ const ( healthCheckFile = "health-check-file" ) +//nolint:gocyclo,cyclop // Function requires multiple checks. func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { ctx, span := otel.Tracer("").Start(ctx, "healthcheck.Controller.Reconcile") defer span.End() @@ -120,15 +120,31 @@ func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } }() + s3BackendClient := c.backendStore.GetBackendClient(providerConfig.Name) + if s3BackendClient == nil { + err := errors.New(errBackendNotStored) + traces.SetAndRecordError(span, err) + + return ctrl.Result{}, err + } + + // Check the backend for the existence of the health check bucket. + bucketExists, err := s3internal.BucketExists(ctx, s3BackendClient, bucketName) + if err != nil { + providerConfig.Status.SetConditions(v1alpha1.HealthCheckFail().WithMessage(err.Error())) + traces.SetAndRecordError(span, err) + + return ctrl.Result{}, err + } + // Create a health check bucket on the backend if one does not already exist. - if err := c.bucketExists(ctx, req.Name, bucketName); err != nil { - if err := c.createBucket(ctx, req.Name, bucketName); err != nil { + if !bucketExists { + _, err := s3internal.CreateBucket(ctx, s3BackendClient, &s3.CreateBucketInput{Bucket: aws.String(bucketName)}) + if resource.Ignore(s3internal.IsAlreadyExists, err) != nil { c.log.Info("Failed to create bucket for health check on s3 backend", consts.KeyBucketName, bucketName, consts.KeyBackendName, providerConfig.Name) - msg := fmt.Sprintf("failed to create health check bucket: %v", err.Error()) - providerConfig.Status.SetConditions(v1alpha1.HealthCheckFail().WithMessage(msg)) - err = errors.Wrap(err, errCreateHealthCheckBucket) + providerConfig.Status.SetConditions(v1alpha1.HealthCheckFail().WithMessage(err.Error())) traces.SetAndRecordError(span, err) return ctrl.Result{}, err @@ -140,10 +156,7 @@ func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if err := c.doHealthCheck(ctx, providerConfig, bucketName); err != nil { c.log.Info("Failed to do health check on s3 backend", consts.KeyBucketName, bucketName, consts.KeyBackendName, providerConfig.Name) - msg := errDoHealthCheck + ": " + err.Error() - providerConfig.Status.SetConditions(v1alpha1.HealthCheckFail().WithMessage(msg)) - - err = errors.Wrap(err, errDoHealthCheck) + providerConfig.Status.SetConditions(v1alpha1.HealthCheckFail().WithMessage(err.Error())) traces.SetAndRecordError(span, err) return ctrl.Result{}, err @@ -177,19 +190,26 @@ func (c *Controller) cleanup(ctx context.Context, req ctrl.Request, bucketName s return nil } - c.log.Info("Deleting health check bucket", consts.KeyBucketName, bucketName, consts.KeyBackendName, req.Name) + g := new(errgroup.Group) + g.Go(func() error { + c.log.Info("Deleting health check bucket", consts.KeyBucketName, bucketName, consts.KeyBackendName, req.Name) + if err := s3internal.DeleteBucket(ctx, backendClient, aws.String(bucketName)); err != nil { + return errors.Wrap(err, errDeleteHealthCheckBucket) + } - if err := s3internal.DeleteBucket(ctx, backendClient, aws.String(bucketName)); err != nil { - return errors.Wrap(err, errDeleteHealthCheckBucket) - } + return nil + }) - c.log.Info("Deleting lifecycle configuration validation bucket", consts.KeyBucketName, v1alpha1.LifecycleConfigValidationBucketName, consts.KeyBackendName, req.Name) + g.Go(func() error { + c.log.Info("Deleting lifecycle configuration validation bucket", consts.KeyBucketName, v1alpha1.LifecycleConfigValidationBucketName, consts.KeyBackendName, req.Name) + if err := s3internal.DeleteBucket(ctx, backendClient, aws.String(v1alpha1.LifecycleConfigValidationBucketName)); err != nil { + return errors.Wrap(err, errDeleteLCValidationBucket) + } - if err := s3internal.DeleteBucket(ctx, backendClient, aws.String(v1alpha1.LifecycleConfigValidationBucketName)); err != nil { - return errors.Wrap(err, errDeleteLCValidationBucket) - } + return nil + }) - return nil + return g.Wait() } // doHealthCheck performs a PutObject and GetObject on the health check bucket on the backend. @@ -202,21 +222,26 @@ func (c *Controller) doHealthCheck(ctx context.Context, providerConfig *apisv1al return errors.New(errBackendNotStored) } - _, putErr := s3BackendClient.PutObject(ctx, &s3.PutObjectInput{ + if putErr := s3internal.PutObject(ctx, s3BackendClient, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(healthCheckFile), Body: strings.NewReader(time.Now().Format(time.RFC850)), - }) - if putErr != nil { - return errors.Wrap(putErr, errPutHealthCheckFile) + }); putErr != nil { + putErr = errors.Wrap(putErr, errPutHealthCheckFile) + traces.SetAndRecordError(span, putErr) + + return putErr } - _, getErr := s3BackendClient.GetObject(ctx, &s3.GetObjectInput{ + _, getErr := s3internal.GetObject(ctx, s3BackendClient, &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(healthCheckFile), }) if getErr != nil { - return errors.Wrap(getErr, errGetHealthCheckFile) + getErr = errors.Wrap(getErr, errGetHealthCheckFile) + traces.SetAndRecordError(span, getErr) + + return getErr } // Health check completed successfully, update status. @@ -225,30 +250,6 @@ func (c *Controller) doHealthCheck(ctx context.Context, providerConfig *apisv1al return nil } -func (c *Controller) bucketExists(ctx context.Context, s3BackendName, bucketName string) error { - s3BackendClient := c.backendStore.GetBackendClient(s3BackendName) - if s3BackendClient == nil { - return errors.New(errBackendNotStored) - } - - _, err := s3BackendClient.HeadBucket(ctx, &s3.HeadBucketInput{Bucket: aws.String(bucketName)}) - - return err -} - -func (c *Controller) createBucket(ctx context.Context, s3BackendName, bucketName string) error { - s3BackendClient := c.backendStore.GetBackendClient(s3BackendName) - if s3BackendClient == nil { - return errors.New(errBackendNotStored) - } - - _, err := s3BackendClient.CreateBucket(ctx, &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), - }) - - return resource.Ignore(s3internal.IsAlreadyExists, err) -} - // unpauseBuckets lists all buckets that exist on the given backend by using the custom // backend label. Then, using retry.OnError(), it attempts to unpause each of these buckets // by unsetting the Pause label. diff --git a/internal/controller/providerconfig/healthcheck/healthcheck_controller_test.go b/internal/controller/providerconfig/healthcheck/healthcheck_controller_test.go index f3dff0f4..992ac0e1 100644 --- a/internal/controller/providerconfig/healthcheck/healthcheck_controller_test.go +++ b/internal/controller/providerconfig/healthcheck/healthcheck_controller_test.go @@ -43,8 +43,14 @@ import ( func TestReconcile(t *testing.T) { t.Parallel() backendName := "test-backend" + lowerPutObjErr := errors.New("some put err") putObjErr := errors.New("failed to put object") + lowerGetObjErr := errors.New("some get err") getObjErr := errors.New("failed to get object") + lowerHeadBucketErr := errors.New("some head bucket err") + headBucketErr := errors.New("failed to perform head bucket") + lowerCreateBucketErr := errors.New("some create bucket err") + createBucketErr := errors.New("failed to create bucket") type fields struct { fakeS3Client func(*backendstorefakes.FakeS3Client) @@ -139,13 +145,134 @@ func TestReconcile(t *testing.T) { }, }, }, + "ProviderConfig goes from healthy to unhealthy due to failed head bucket": { + fields: fields{ + fakeS3Client: func(fake *backendstorefakes.FakeS3Client) { + // fail the health check with a HeadBucket error + fake.HeadBucketReturns( + &s3.HeadBucketOutput{}, + lowerHeadBucketErr, + ) + }, + providerConfig: &apisv1alpha1.ProviderConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendName, + }, + Spec: apisv1alpha1.ProviderConfigSpec{ + DisableHealthCheck: false, + }, + Status: apisv1alpha1.ProviderConfigStatus{ + ProviderConfigStatus: xpv1.ProviderConfigStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + v1alpha1.HealthCheckSuccess(), + }, + }, + }, + }, + }, + }, + args: args{ + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: backendName, + }, + }, + }, + want: want{ + res: ctrl.Result{}, + err: lowerHeadBucketErr, + pc: &apisv1alpha1.ProviderConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendName, + }, + Spec: apisv1alpha1.ProviderConfigSpec{ + DisableHealthCheck: false, + }, + Status: apisv1alpha1.ProviderConfigStatus{ + ProviderConfigStatus: xpv1.ProviderConfigStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + v1alpha1.HealthCheckFail(). + WithMessage(errors.Wrap(lowerHeadBucketErr, headBucketErr.Error()).Error()), + }, + }, + }, + }, + }, + }, + }, + "ProviderConfig goes from healthy to unhealthy due to failed create bucket": { + fields: fields{ + fakeS3Client: func(fake *backendstorefakes.FakeS3Client) { + // HeadBucket returns not found so the bucket + // does not exist and must be created. + var notFoundError *s3types.NotFound + fake.HeadBucketReturns( + &s3.HeadBucketOutput{}, + notFoundError, + ) + // fail the health check with a CreateBucket error + fake.CreateBucketReturns( + &s3.CreateBucketOutput{}, + lowerCreateBucketErr, + ) + }, + providerConfig: &apisv1alpha1.ProviderConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendName, + }, + Spec: apisv1alpha1.ProviderConfigSpec{ + DisableHealthCheck: false, + }, + Status: apisv1alpha1.ProviderConfigStatus{ + ProviderConfigStatus: xpv1.ProviderConfigStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + v1alpha1.HealthCheckSuccess(), + }, + }, + }, + }, + }, + }, + args: args{ + req: ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: backendName, + }, + }, + }, + want: want{ + res: ctrl.Result{}, + err: lowerCreateBucketErr, + pc: &apisv1alpha1.ProviderConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: backendName, + }, + Spec: apisv1alpha1.ProviderConfigSpec{ + DisableHealthCheck: false, + }, + Status: apisv1alpha1.ProviderConfigStatus{ + ProviderConfigStatus: xpv1.ProviderConfigStatus{ + ConditionedStatus: xpv1.ConditionedStatus{ + Conditions: []xpv1.Condition{ + v1alpha1.HealthCheckFail(). + WithMessage(errors.Wrap(errors.Wrap(lowerCreateBucketErr, createBucketErr.Error()), errCreateHealthCheckBucket).Error()), + }, + }, + }, + }, + }, + }, + }, "ProviderConfig goes from healthy to unhealthy due to failed put object": { fields: fields{ fakeS3Client: func(fake *backendstorefakes.FakeS3Client) { // fail the health check with a PutObject error fake.PutObjectReturns( &s3.PutObjectOutput{}, - putObjErr, + lowerPutObjErr, ) }, providerConfig: &apisv1alpha1.ProviderConfig{ @@ -175,7 +302,7 @@ func TestReconcile(t *testing.T) { }, want: want{ res: ctrl.Result{}, - err: putObjErr, + err: lowerPutObjErr, pc: &apisv1alpha1.ProviderConfig{ ObjectMeta: metav1.ObjectMeta{ Name: backendName, @@ -188,7 +315,7 @@ func TestReconcile(t *testing.T) { ConditionedStatus: xpv1.ConditionedStatus{ Conditions: []xpv1.Condition{ v1alpha1.HealthCheckFail(). - WithMessage(errDoHealthCheck + ": " + errors.Wrap(putObjErr, errPutHealthCheckFile).Error()), + WithMessage(errors.Wrap(errors.Wrap(lowerPutObjErr, putObjErr.Error()), errPutHealthCheckFile).Error()), }, }, }, @@ -202,7 +329,7 @@ func TestReconcile(t *testing.T) { // fail the health check with a GetObject error fake.GetObjectReturns( &s3.GetObjectOutput{}, - getObjErr, + lowerGetObjErr, ) }, providerConfig: &apisv1alpha1.ProviderConfig{ @@ -232,7 +359,7 @@ func TestReconcile(t *testing.T) { }, want: want{ res: ctrl.Result{}, - err: getObjErr, + err: lowerGetObjErr, pc: &apisv1alpha1.ProviderConfig{ ObjectMeta: metav1.ObjectMeta{ Name: backendName, @@ -245,7 +372,7 @@ func TestReconcile(t *testing.T) { ConditionedStatus: xpv1.ConditionedStatus{ Conditions: []xpv1.Condition{ v1alpha1.HealthCheckFail(). - WithMessage(errDoHealthCheck + ": " + errors.Wrap(getObjErr, errGetHealthCheckFile).Error()), + WithMessage(errors.Wrap(errors.Wrap(lowerGetObjErr, getObjErr.Error()), errGetHealthCheckFile).Error()), }, }, }, diff --git a/internal/s3/bucket.go b/internal/s3/bucket.go index cca71b09..1e1bd7bb 100644 --- a/internal/s3/bucket.go +++ b/internal/s3/bucket.go @@ -4,11 +4,9 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" "github.com/linode/provider-ceph/internal/backendstore" "github.com/linode/provider-ceph/internal/otel/traces" "github.com/linode/provider-ceph/internal/s3/cache" @@ -17,9 +15,6 @@ import ( ) const ( - errListObjects = "failed to list objects" - errDeleteObject = "failed to delete object" - errGetBucket = "failed to get bucket" errListBuckets = "failed to list buckets" errCreateBucket = "failed to create bucket" @@ -30,51 +25,42 @@ const ( RequestRetries = 5 ) -func BucketToCreateBucketInput(bucket *v1alpha1.Bucket) *s3.CreateBucketInput { - createBucketInput := &s3.CreateBucketInput{ - ACL: s3types.BucketCannedACL(aws.ToString(bucket.Spec.ForProvider.ACL)), - Bucket: aws.String(bucket.Name), - GrantFullControl: bucket.Spec.ForProvider.GrantFullControl, - GrantRead: bucket.Spec.ForProvider.GrantRead, - GrantReadACP: bucket.Spec.ForProvider.GrantReadACP, - GrantWrite: bucket.Spec.ForProvider.GrantWrite, - GrantWriteACP: bucket.Spec.ForProvider.GrantWriteACP, - ObjectLockEnabledForBucket: aws.ToBool(bucket.Spec.ForProvider.ObjectLockEnabledForBucket), - ObjectOwnership: s3types.ObjectOwnership(aws.ToString(bucket.Spec.ForProvider.ObjectOwnership)), - } +func CreateBucket(ctx context.Context, s3Backend backendstore.S3Client, bucket *awss3.CreateBucketInput) (*awss3.CreateBucketOutput, error) { + ctx, span := otel.Tracer("").Start(ctx, "CreateBucket") + defer span.End() - if bucket.Spec.ForProvider.LocationConstraint != "" { - createBucketInput.CreateBucketConfiguration = &s3types.CreateBucketConfiguration{ - LocationConstraint: s3types.BucketLocationConstraint(bucket.Spec.ForProvider.LocationConstraint), - } + resp, err := s3Backend.CreateBucket(ctx, bucket) + if resource.Ignore(IsAlreadyExists, err) != nil { + traces.SetAndRecordError(span, err) + + return resp, errors.Wrap(err, errCreateBucket) } - return createBucketInput -} + cache.Set(*bucket.Bucket) -func BucketToPutBucketACLInput(bucket *v1alpha1.Bucket) *s3.PutBucketAclInput { - return &s3.PutBucketAclInput{ - ACL: s3types.BucketCannedACL(aws.ToString(bucket.Spec.ForProvider.ACL)), - Bucket: aws.String(bucket.Name), - GrantFullControl: bucket.Spec.ForProvider.GrantFullControl, - GrantRead: bucket.Spec.ForProvider.GrantRead, - GrantReadACP: bucket.Spec.ForProvider.GrantReadACP, - GrantWrite: bucket.Spec.ForProvider.GrantWrite, - GrantWriteACP: bucket.Spec.ForProvider.GrantWriteACP, - } + return resp, err } -func BucketToPutBucketOwnershipControlsInput(bucket *v1alpha1.Bucket) *s3.PutBucketOwnershipControlsInput { - return &s3.PutBucketOwnershipControlsInput{ - Bucket: aws.String(bucket.Name), - OwnershipControls: &s3types.OwnershipControls{ - Rules: []s3types.OwnershipControlsRule{ - { - ObjectOwnership: s3types.ObjectOwnership(aws.ToString(bucket.Spec.ForProvider.ObjectOwnership)), - }, - }, - }, +func BucketExists(ctx context.Context, s3Backend backendstore.S3Client, bucketName string) (bool, error) { + ctx, span := otel.Tracer("").Start(ctx, "BucketExists") + defer span.End() + + _, err := s3Backend.HeadBucket(ctx, &awss3.HeadBucketInput{Bucket: aws.String(bucketName)}) + if err != nil { + // An IsNotFound error means the call was successful + // and the bucket does not exist so we return no error. + if resource.Ignore(IsNotFound, err) == nil { + return false, nil + } + traces.SetAndRecordError(span, err) + + return false, errors.Wrap(err, errHeadBucket) } + + cache.Set(bucketName) + + // Bucket exists, return true with no error. + return true, nil } func DeleteBucket(ctx context.Context, s3Backend backendstore.S3Client, bucketName *string) error { @@ -110,7 +96,7 @@ func DeleteBucket(ctx context.Context, s3Backend backendstore.S3Client, bucketNa return errors.Wrap(err, errDeleteBucket) } - _, err = s3Backend.DeleteBucket(ctx, &s3.DeleteBucketInput{Bucket: bucketName}) + _, err = s3Backend.DeleteBucket(ctx, &awss3.DeleteBucketInput{Bucket: bucketName}) if resource.Ignore(IsNotFound, err) != nil { traces.SetAndRecordError(span, err) @@ -124,9 +110,9 @@ func deleteBucketObjects(ctx context.Context, s3Backend backendstore.S3Client, b ctx, span := otel.Tracer("").Start(ctx, "deleteBucketObjects") defer span.End() - objectsInput := &s3.ListObjectsV2Input{Bucket: bucketName} + objectsInput := &awss3.ListObjectsV2Input{Bucket: bucketName} for { - objects, err := s3Backend.ListObjectsV2(ctx, objectsInput) + objects, err := ListObjectsV2(ctx, s3Backend, objectsInput) if err != nil { err = errors.Wrap(err, errListObjects) traces.SetAndRecordError(span, err) @@ -138,7 +124,7 @@ func deleteBucketObjects(ctx context.Context, s3Backend backendstore.S3Client, b for _, object := range objects.Contents { obj := object g.Go(func() error { - return deleteObject(ctx, s3Backend, bucketName, obj.Key, nil) + return DeleteObject(ctx, s3Backend, &awss3.DeleteObjectInput{Bucket: bucketName, Key: obj.Key}) }) } @@ -167,9 +153,9 @@ func deleteBucketObjectVersions(ctx context.Context, s3Backend backendstore.S3Cl ctx, span := otel.Tracer("").Start(ctx, "deleteBucketObjectVersions") defer span.End() - objVersionsInput := &s3.ListObjectVersionsInput{Bucket: bucketName} + objVersionsInput := &awss3.ListObjectVersionsInput{Bucket: bucketName} for { - objectVersions, err := s3Backend.ListObjectVersions(ctx, objVersionsInput) + objectVersions, err := ListObjectVersions(ctx, s3Backend, objVersionsInput) if err != nil { err = errors.Wrap(err, errListObjects) traces.SetAndRecordError(span, err) @@ -181,14 +167,14 @@ func deleteBucketObjectVersions(ctx context.Context, s3Backend backendstore.S3Cl for _, deleteMarkerEntry := range objectVersions.DeleteMarkers { delMark := deleteMarkerEntry g.Go(func() error { - return deleteObject(ctx, s3Backend, bucketName, delMark.Key, delMark.VersionId) + return DeleteObject(ctx, s3Backend, &awss3.DeleteObjectInput{Bucket: bucketName, Key: delMark.Key, VersionId: delMark.VersionId}) }) } for _, objectVersion := range objectVersions.Versions { objVer := objectVersion g.Go(func() error { - return deleteObject(ctx, s3Backend, bucketName, objVer.Key, objVer.VersionId) + return DeleteObject(ctx, s3Backend, &awss3.DeleteObjectInput{Bucket: bucketName, Key: objVer.Key, VersionId: objVer.VersionId}) }) } @@ -213,82 +199,3 @@ func deleteBucketObjectVersions(ctx context.Context, s3Backend backendstore.S3Cl return nil } - -func deleteObject(ctx context.Context, s3Backend backendstore.S3Client, bucket, key, versionId *string) error { - ctx, span := otel.Tracer("").Start(ctx, "deleteObject") - defer span.End() - - var err error - for i := 0; i < RequestRetries; i++ { - _, err = s3Backend.DeleteObject(ctx, &s3.DeleteObjectInput{ - Bucket: bucket, - Key: key, - VersionId: versionId, - }) - if resource.Ignore(IsNotFound, err) == nil { - return nil - } - } - traces.SetAndRecordError(span, err) - - return err -} - -func CreateBucket(ctx context.Context, s3Backend backendstore.S3Client, bucket *s3.CreateBucketInput) (*s3.CreateBucketOutput, error) { - ctx, span := otel.Tracer("").Start(ctx, "CreateBucket") - defer span.End() - - resp, err := s3Backend.CreateBucket(ctx, bucket) - if resource.Ignore(IsAlreadyExists, err) != nil { - traces.SetAndRecordError(span, err) - - return resp, errors.Wrap(err, errCreateBucket) - } - - cache.Set(*bucket.Bucket) - - return resp, err -} - -func BucketExists(ctx context.Context, s3Backend backendstore.S3Client, bucketName string) (bool, error) { - ctx, span := otel.Tracer("").Start(ctx, "BucketExists") - defer span.End() - - _, err := s3Backend.HeadBucket(ctx, &s3.HeadBucketInput{Bucket: aws.String(bucketName)}) - if err != nil { - // An IsNotFound error means the call was successful - // and the bucket does not exist so we return no error. - if resource.Ignore(IsNotFound, err) == nil { - return false, nil - } - traces.SetAndRecordError(span, err) - - return false, errors.Wrap(err, errHeadBucket) - } - - cache.Set(bucketName) - - // Bucket exists, return true with no error. - return true, nil -} - -// IsAlreadyExists helper function to test for ErrCodeBucketAlreadyOwnedByYou error -func IsAlreadyExists(err error) bool { - var alreadyOwnedByYou *s3types.BucketAlreadyOwnedByYou - - return errors.As(err, &alreadyOwnedByYou) -} - -// IsNotFound helper function to test for NotFound error -func IsNotFound(err error) bool { - var notFoundError *s3types.NotFound - - return errors.As(err, ¬FoundError) -} - -// NoSuchBucket helper function to test for NoSuchBucket error -func NoSuchBucket(err error) bool { - var noSuchBucketError *s3types.NoSuchBucket - - return errors.As(err, &noSuchBucketError) -} diff --git a/internal/s3/bucket_helpers.go b/internal/s3/bucket_helpers.go new file mode 100644 index 00000000..23c04e9d --- /dev/null +++ b/internal/s3/bucket_helpers.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" +) + +func BucketToCreateBucketInput(bucket *v1alpha1.Bucket) *s3.CreateBucketInput { + createBucketInput := &s3.CreateBucketInput{ + ACL: s3types.BucketCannedACL(aws.ToString(bucket.Spec.ForProvider.ACL)), + Bucket: aws.String(bucket.Name), + GrantFullControl: bucket.Spec.ForProvider.GrantFullControl, + GrantRead: bucket.Spec.ForProvider.GrantRead, + GrantReadACP: bucket.Spec.ForProvider.GrantReadACP, + GrantWrite: bucket.Spec.ForProvider.GrantWrite, + GrantWriteACP: bucket.Spec.ForProvider.GrantWriteACP, + ObjectLockEnabledForBucket: aws.ToBool(bucket.Spec.ForProvider.ObjectLockEnabledForBucket), + ObjectOwnership: s3types.ObjectOwnership(aws.ToString(bucket.Spec.ForProvider.ObjectOwnership)), + } + + if bucket.Spec.ForProvider.LocationConstraint != "" { + createBucketInput.CreateBucketConfiguration = &s3types.CreateBucketConfiguration{ + LocationConstraint: s3types.BucketLocationConstraint(bucket.Spec.ForProvider.LocationConstraint), + } + } + + return createBucketInput +} + +func BucketToPutBucketACLInput(bucket *v1alpha1.Bucket) *s3.PutBucketAclInput { + return &s3.PutBucketAclInput{ + ACL: s3types.BucketCannedACL(aws.ToString(bucket.Spec.ForProvider.ACL)), + Bucket: aws.String(bucket.Name), + GrantFullControl: bucket.Spec.ForProvider.GrantFullControl, + GrantRead: bucket.Spec.ForProvider.GrantRead, + GrantReadACP: bucket.Spec.ForProvider.GrantReadACP, + GrantWrite: bucket.Spec.ForProvider.GrantWrite, + GrantWriteACP: bucket.Spec.ForProvider.GrantWriteACP, + } +} + +func BucketToPutBucketOwnershipControlsInput(bucket *v1alpha1.Bucket) *s3.PutBucketOwnershipControlsInput { + return &s3.PutBucketOwnershipControlsInput{ + Bucket: aws.String(bucket.Name), + OwnershipControls: &s3types.OwnershipControls{ + Rules: []s3types.OwnershipControlsRule{ + { + ObjectOwnership: s3types.ObjectOwnership(aws.ToString(bucket.Spec.ForProvider.ObjectOwnership)), + }, + }, + }, + } +} + +// IsAlreadyExists helper function to test for ErrCodeBucketAlreadyOwnedByYou error +func IsAlreadyExists(err error) bool { + var alreadyOwnedByYou *s3types.BucketAlreadyOwnedByYou + + return errors.As(err, &alreadyOwnedByYou) +} + +// IsNotFound helper function to test for NotFound error +func IsNotFound(err error) bool { + var notFoundError *s3types.NotFound + + return errors.As(err, ¬FoundError) +} + +// NoSuchBucket helper function to test for NoSuchBucket error +func NoSuchBucket(err error) bool { + var noSuchBucketError *s3types.NoSuchBucket + + return errors.As(err, &noSuchBucketError) +} diff --git a/internal/s3/lifecycleconfig.go b/internal/s3/lifecycleconfig.go index d30b88c1..5728b3b1 100644 --- a/internal/s3/lifecycleconfig.go +++ b/internal/s3/lifecycleconfig.go @@ -2,12 +2,8 @@ package s3 import ( "context" - "sort" - "github.com/aws/aws-sdk-go-v2/aws" awss3 "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource" "go.opentelemetry.io/otel" @@ -23,137 +19,6 @@ const ( errDeleteLifecycle = "failed to delete bucket lifecycle" ) -// GenerateLifecycleConfiguration creates the PutBucketLifecycleConfigurationInput for the AWS SDK -func GenerateLifecycleConfigurationInput(name string, config *v1alpha1.BucketLifecycleConfiguration) *awss3.PutBucketLifecycleConfigurationInput { - if config == nil { - return nil - } - - return &awss3.PutBucketLifecycleConfigurationInput{ - Bucket: aws.String(name), - LifecycleConfiguration: &types.BucketLifecycleConfiguration{Rules: GenerateLifecycleRules(config.Rules)}, - } -} - -// GenerateLifecycleRules creates the list of LifecycleRules for the AWS SDK -func GenerateLifecycleRules(in []v1alpha1.LifecycleRule) []types.LifecycleRule { //nolint:gocognit,gocyclo,cyclop // Function requires many checks. - // NOTE(muvaf): prealloc is disabled due to AWS requiring nil instead - // of 0-length for empty slices. - var result []types.LifecycleRule //nolint:prealloc // NOTE(muvaf): prealloc is disabled due to AWS requiring nil instead of 0-length for empty slices. - for _, local := range in { - rule := types.LifecycleRule{ - ID: local.ID, - Status: types.ExpirationStatus(local.Status), - } - if local.AbortIncompleteMultipartUpload != nil { - rule.AbortIncompleteMultipartUpload = &types.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: local.AbortIncompleteMultipartUpload.DaysAfterInitiation, - } - } - if local.Expiration != nil { - rule.Expiration = &types.LifecycleExpiration{ - ExpiredObjectDeleteMarker: local.Expiration.ExpiredObjectDeleteMarker, - } - if local.Expiration.Days != nil { - rule.Expiration.Days = *local.Expiration.Days - } - if local.Expiration.Date != nil { - rule.Expiration.Date = &local.Expiration.Date.Time - } - } - if local.NoncurrentVersionExpiration != nil { - if local.NoncurrentVersionExpiration.NoncurrentDays != nil { - rule.NoncurrentVersionExpiration = &types.NoncurrentVersionExpiration{NoncurrentDays: *local.NoncurrentVersionExpiration.NoncurrentDays} - } - } - if local.NoncurrentVersionTransitions != nil { - rule.NoncurrentVersionTransitions = make([]types.NoncurrentVersionTransition, 0) - for _, transition := range local.NoncurrentVersionTransitions { - nonCurrentVersionTransition := types.NoncurrentVersionTransition{} - if transition.NoncurrentDays != nil { - nonCurrentVersionTransition.NoncurrentDays = *transition.NoncurrentDays - } - if transition.NewerNoncurrentVersions != nil { - nonCurrentVersionTransition.NewerNoncurrentVersions = *transition.NewerNoncurrentVersions - } - nonCurrentVersionTransition.StorageClass = types.TransitionStorageClass(transition.StorageClass) - - rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, nonCurrentVersionTransition) - } - } - if local.Transitions != nil { - rule.Transitions = make([]types.Transition, 0) - for _, localTransition := range local.Transitions { - transition := types.Transition{} - if localTransition.Days != nil { - transition.Days = *localTransition.Days - } - if localTransition.Date != nil { - transition.Date = &localTransition.Date.Time - } - - transition.StorageClass = types.TransitionStorageClass(localTransition.StorageClass) - rule.Transitions = append(rule.Transitions, transition) - } - } - // This is done because S3 expects an empty filter, and never nil - rule.Filter = &types.LifecycleRuleFilterMemberPrefix{} - //nolint:nestif // Multiple checks required - if local.Filter != nil { - if local.Filter.Prefix != nil { - rule.Filter = &types.LifecycleRuleFilterMemberPrefix{Value: *local.Filter.Prefix} - } - if local.Filter.Tag != nil { - rule.Filter = &types.LifecycleRuleFilterMemberTag{Value: types.Tag{Key: aws.String(local.Filter.Tag.Key), Value: aws.String(local.Filter.Tag.Value)}} - } - if local.Filter.And != nil { - andOperator := types.LifecycleRuleAndOperator{} - if local.Filter.And.Prefix != nil { - andOperator.Prefix = local.Filter.And.Prefix - } - - if local.Filter.And.Tags != nil { - andOperator.Tags = sortS3TagSet(copyTags(local.Filter.And.Tags)) - } - rule.Filter = &types.LifecycleRuleFilterMemberAnd{Value: andOperator} - } - } - result = append(result, rule) - } - - return result -} - -// copyTags converts a list of local v1beta.Tags to S3 Tags -func copyTags(tags []v1alpha1.Tag) []types.Tag { - out := make([]types.Tag, 0) - for _, one := range tags { - out = append(out, types.Tag{Key: aws.String(one.Key), Value: aws.String(one.Value)}) - } - - return out -} - -// sortS3TagSet stable sorts an external s3 tag list by the key and value. -func sortS3TagSet(tags []types.Tag) []types.Tag { - outTags := make([]types.Tag, len(tags)) - copy(outTags, tags) - sort.SliceStable(outTags, func(i, j int) bool { - return aws.ToString(outTags[i].Key) < aws.ToString(outTags[j].Key) - }) - - return outTags -} - -func SortFilterTags(rules []types.LifecycleRule) { - for i := range rules { - andOperator, ok := rules[i].Filter.(*types.LifecycleRuleFilterMemberAnd) - if ok { - andOperator.Value.Tags = sortS3TagSet(andOperator.Value.Tags) - } - } -} - func PutBucketLifecycleConfiguration(ctx context.Context, s3Backend backendstore.S3Client, b *v1alpha1.Bucket) (*awss3.PutBucketLifecycleConfigurationOutput, error) { ctx, span := otel.Tracer("").Start(ctx, "PutBucketLifecycleConfiguration") defer span.End() @@ -202,23 +67,3 @@ func GetBucketLifecycleConfiguration(ctx context.Context, s3Backend backendstore return resp, nil } - -// LifecycleNotFoundErrCode is the error code sent by Ceph when the lifecycle config does not exist -var LifecycleNotFoundErrCode = "NoSuchLifecycleConfiguration" - -// LifecycleConfigurationNotFound is parses the error and validates if the lifecycle configuration does not exist -func LifecycleConfigurationNotFound(err error) bool { - var awsErr smithy.APIError - - return errors.As(err, &awsErr) && awsErr.ErrorCode() == LifecycleNotFoundErrCode -} - -// NoSuchBucketErrCode is the error code sent by Ceph when the bucket does not exist -var NoSuchBucketErrCode = "NoSuchBucket" - -// BucketNotFound parses the error and validates if the bucket does not exist -func IsBucketNotFound(err error) bool { - var awsErr smithy.APIError - - return errors.As(err, &awsErr) && awsErr.ErrorCode() == NoSuchBucketErrCode -} diff --git a/internal/s3/lifecycleconfig_helpers.go b/internal/s3/lifecycleconfig_helpers.go new file mode 100644 index 00000000..3a54b558 --- /dev/null +++ b/internal/s3/lifecycleconfig_helpers.go @@ -0,0 +1,162 @@ +package s3 + +import ( + "sort" + + "github.com/aws/aws-sdk-go-v2/aws" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/linode/provider-ceph/apis/provider-ceph/v1alpha1" +) + +// GenerateLifecycleConfiguration creates the PutBucketLifecycleConfigurationInput for the AWS SDK +func GenerateLifecycleConfigurationInput(name string, config *v1alpha1.BucketLifecycleConfiguration) *awss3.PutBucketLifecycleConfigurationInput { + if config == nil { + return nil + } + + return &awss3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(name), + LifecycleConfiguration: &types.BucketLifecycleConfiguration{Rules: GenerateLifecycleRules(config.Rules)}, + } +} + +// GenerateLifecycleRules creates the list of LifecycleRules for the AWS SDK +func GenerateLifecycleRules(in []v1alpha1.LifecycleRule) []types.LifecycleRule { //nolint:gocognit,gocyclo,cyclop // Function requires many checks. + // NOTE(muvaf): prealloc is disabled due to AWS requiring nil instead + // of 0-length for empty slices. + var result []types.LifecycleRule //nolint:prealloc // NOTE(muvaf): prealloc is disabled due to AWS requiring nil instead of 0-length for empty slices. + for _, local := range in { + rule := types.LifecycleRule{ + ID: local.ID, + Status: types.ExpirationStatus(local.Status), + } + if local.AbortIncompleteMultipartUpload != nil { + rule.AbortIncompleteMultipartUpload = &types.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: local.AbortIncompleteMultipartUpload.DaysAfterInitiation, + } + } + if local.Expiration != nil { + rule.Expiration = &types.LifecycleExpiration{ + ExpiredObjectDeleteMarker: local.Expiration.ExpiredObjectDeleteMarker, + } + if local.Expiration.Days != nil { + rule.Expiration.Days = *local.Expiration.Days + } + if local.Expiration.Date != nil { + rule.Expiration.Date = &local.Expiration.Date.Time + } + } + if local.NoncurrentVersionExpiration != nil && local.NoncurrentVersionExpiration.NoncurrentDays != nil { + rule.NoncurrentVersionExpiration = &types.NoncurrentVersionExpiration{NoncurrentDays: *local.NoncurrentVersionExpiration.NoncurrentDays} + } + + if local.NoncurrentVersionTransitions != nil { + rule.NoncurrentVersionTransitions = make([]types.NoncurrentVersionTransition, 0) + for _, transition := range local.NoncurrentVersionTransitions { + nonCurrentVersionTransition := types.NoncurrentVersionTransition{} + if transition.NoncurrentDays != nil { + nonCurrentVersionTransition.NoncurrentDays = *transition.NoncurrentDays + } + if transition.NewerNoncurrentVersions != nil { + nonCurrentVersionTransition.NewerNoncurrentVersions = *transition.NewerNoncurrentVersions + } + nonCurrentVersionTransition.StorageClass = types.TransitionStorageClass(transition.StorageClass) + + rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, nonCurrentVersionTransition) + } + } + if local.Transitions != nil { + rule.Transitions = make([]types.Transition, 0) + for _, localTransition := range local.Transitions { + transition := types.Transition{} + if localTransition.Days != nil { + transition.Days = *localTransition.Days + } + if localTransition.Date != nil { + transition.Date = &localTransition.Date.Time + } + + transition.StorageClass = types.TransitionStorageClass(localTransition.StorageClass) + rule.Transitions = append(rule.Transitions, transition) + } + } + // This is done because S3 expects an empty filter, and never nil + rule.Filter = &types.LifecycleRuleFilterMemberPrefix{} + //nolint:nestif // Multiple checks required + if local.Filter != nil { + if local.Filter.Prefix != nil { + rule.Filter = &types.LifecycleRuleFilterMemberPrefix{Value: *local.Filter.Prefix} + } + if local.Filter.Tag != nil { + rule.Filter = &types.LifecycleRuleFilterMemberTag{Value: types.Tag{Key: aws.String(local.Filter.Tag.Key), Value: aws.String(local.Filter.Tag.Value)}} + } + if local.Filter.And != nil { + andOperator := types.LifecycleRuleAndOperator{} + if local.Filter.And.Prefix != nil { + andOperator.Prefix = local.Filter.And.Prefix + } + + if local.Filter.And.Tags != nil { + andOperator.Tags = sortS3TagSet(copyTags(local.Filter.And.Tags)) + } + rule.Filter = &types.LifecycleRuleFilterMemberAnd{Value: andOperator} + } + } + result = append(result, rule) + } + + return result +} + +// copyTags converts a list of local v1beta.Tags to S3 Tags +func copyTags(tags []v1alpha1.Tag) []types.Tag { + out := make([]types.Tag, 0) + for _, one := range tags { + out = append(out, types.Tag{Key: aws.String(one.Key), Value: aws.String(one.Value)}) + } + + return out +} + +// sortS3TagSet stable sorts an external s3 tag list by the key and value. +func sortS3TagSet(tags []types.Tag) []types.Tag { + outTags := make([]types.Tag, len(tags)) + copy(outTags, tags) + sort.SliceStable(outTags, func(i, j int) bool { + return aws.ToString(outTags[i].Key) < aws.ToString(outTags[j].Key) + }) + + return outTags +} + +func SortFilterTags(rules []types.LifecycleRule) { + for i := range rules { + andOperator, ok := rules[i].Filter.(*types.LifecycleRuleFilterMemberAnd) + if ok { + andOperator.Value.Tags = sortS3TagSet(andOperator.Value.Tags) + } + } +} + +// LifecycleNotFoundErrCode is the error code sent by Ceph when the lifecycle config does not exist +var LifecycleNotFoundErrCode = "NoSuchLifecycleConfiguration" + +// LifecycleConfigurationNotFound is parses the error and validates if the lifecycle configuration does not exist +func LifecycleConfigurationNotFound(err error) bool { + var awsErr smithy.APIError + + return errors.As(err, &awsErr) && awsErr.ErrorCode() == LifecycleNotFoundErrCode +} + +// NoSuchBucketErrCode is the error code sent by Ceph when the bucket does not exist +var NoSuchBucketErrCode = "NoSuchBucket" + +// BucketNotFound parses the error and validates if the bucket does not exist +func IsBucketNotFound(err error) bool { + var awsErr smithy.APIError + + return errors.As(err, &awsErr) && awsErr.ErrorCode() == NoSuchBucketErrCode +} diff --git a/internal/s3/object.go b/internal/s3/object.go new file mode 100644 index 00000000..3b8e0a85 --- /dev/null +++ b/internal/s3/object.go @@ -0,0 +1,94 @@ +package s3 + +import ( + "context" + + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/linode/provider-ceph/internal/backendstore" + "github.com/linode/provider-ceph/internal/otel/traces" + "go.opentelemetry.io/otel" +) + +const ( + errListObjects = "failed to list objects" + errListObjectVersions = "failed to list object versions" + errDeleteObject = "failed to delete object" + errGetObject = "failed to get object" + errPutObject = "failed to put object" +) + +func GetObject(ctx context.Context, s3Backend backendstore.S3Client, input *awss3.GetObjectInput) (*awss3.GetObjectOutput, error) { + ctx, span := otel.Tracer("").Start(ctx, "GetObject") + defer span.End() + + resp, err := s3Backend.GetObject(ctx, input) + if err != nil { + err = errors.Wrap(err, errGetObject) + traces.SetAndRecordError(span, err) + + return resp, err + } + + return resp, nil +} + +func DeleteObject(ctx context.Context, s3Backend backendstore.S3Client, input *awss3.DeleteObjectInput) error { + ctx, span := otel.Tracer("").Start(ctx, "DeleteObject") + defer span.End() + + _, err := s3Backend.DeleteObject(ctx, input) + if err != nil { + err = errors.Wrap(err, errDeleteObject) + traces.SetAndRecordError(span, err) + + return err + } + + return nil +} + +func PutObject(ctx context.Context, s3Backend backendstore.S3Client, input *awss3.PutObjectInput) error { + ctx, span := otel.Tracer("").Start(ctx, "PutObject") + defer span.End() + + _, err := s3Backend.PutObject(ctx, input) + if err != nil { + err = errors.Wrap(err, errPutObject) + traces.SetAndRecordError(span, err) + + return err + } + + return nil +} + +func ListObjectsV2(ctx context.Context, s3Backend backendstore.S3Client, input *awss3.ListObjectsV2Input) (*awss3.ListObjectsV2Output, error) { + ctx, span := otel.Tracer("").Start(ctx, "ListObjectsV2") + defer span.End() + + resp, err := s3Backend.ListObjectsV2(ctx, input) + if err != nil { + err = errors.Wrap(err, errListObjects) + traces.SetAndRecordError(span, err) + + return resp, err + } + + return resp, nil +} + +func ListObjectVersions(ctx context.Context, s3Backend backendstore.S3Client, input *awss3.ListObjectVersionsInput) (*awss3.ListObjectVersionsOutput, error) { + ctx, span := otel.Tracer("").Start(ctx, "ListObjectsVersions") + defer span.End() + + resp, err := s3Backend.ListObjectVersions(ctx, input) + if err != nil { + err = errors.Wrap(err, errListObjectVersions) + traces.SetAndRecordError(span, err) + + return resp, err + } + + return resp, nil +}