diff --git a/packages/@aws-cdk/aws-s3/README.md b/packages/@aws-cdk/aws-s3/README.md index dab9af9a9d7f8..b666ee14b44d5 100644 --- a/packages/@aws-cdk/aws-s3/README.md +++ b/packages/@aws-cdk/aws-s3/README.md @@ -14,7 +14,7 @@ Define an unencrypted S3 bucket. ```ts -new Bucket(this, 'MyFirstBucket'); +const bucket = new s3.Bucket(this, 'MyFirstBucket'); ``` `Bucket` constructs expose the following deploy-time attributes: @@ -43,8 +43,8 @@ new Bucket(this, 'MyFirstBucket'); Define a KMS-encrypted bucket: ```ts -const bucket = new Bucket(this, 'MyEncryptedBucket', { - encryption: BucketEncryption.KMS +const bucket = new s3.Bucket(this, 'MyEncryptedBucket', { + encryption: s3.BucketEncryption.KMS, }); // you can access the encryption key: @@ -56,9 +56,9 @@ You can also supply your own key: ```ts const myKmsKey = new kms.Key(this, 'MyKey'); -const bucket = new Bucket(this, 'MyEncryptedBucket', { - encryption: BucketEncryption.KMS, - encryptionKey: myKmsKey +const bucket = new s3.Bucket(this, 'MyEncryptedBucket', { + encryption: s3.BucketEncryption.KMS, + encryptionKey: myKmsKey, }); assert(bucket.encryptionKey === myKmsKey); @@ -67,19 +67,17 @@ assert(bucket.encryptionKey === myKmsKey); Enable KMS-SSE encryption via [S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html): ```ts -const bucket = new Bucket(this, 'MyEncryptedBucket', { - encryption: BucketEncryption.KMS, - bucketKeyEnabled: true +const bucket = new s3.Bucket(this, 'MyEncryptedBucket', { + encryption: s3.BucketEncryption.KMS, + bucketKeyEnabled: true, }); - -assert(bucket.bucketKeyEnabled === true); ``` Use `BucketEncryption.ManagedKms` to use the S3 master KMS key: ```ts -const bucket = new Bucket(this, 'Buck', { - encryption: BucketEncryption.KMS_MANAGED +const bucket = new s3.Bucket(this, 'Buck', { + encryption: s3.BucketEncryption.KMS_MANAGED, }); assert(bucket.encryptionKey == null); @@ -91,7 +89,7 @@ A bucket policy will be automatically created for the bucket upon the first call `addToResourcePolicy(statement)`: ```ts -const bucket = new Bucket(this, 'MyBucket'); +const bucket = new s3.Bucket(this, 'MyBucket'); const result = bucket.addToResourcePolicy(new iam.PolicyStatement({ actions: ['s3:GetObject'], resources: [bucket.arnForObjects('file.txt')], @@ -103,11 +101,13 @@ If you try to add a policy statement to an existing bucket, this method will not do anything: ```ts -const bucket = Bucket.fromBucketName(this, 'existingBucket', 'bucket-name'); +const bucket = s3.Bucket.fromBucketName(this, 'existingBucket', 'bucket-name'); -// Nothing will change here +// No policy statement will be added to the resource const result = bucket.addToResourcePolicy(new iam.PolicyStatement({ - ... + actions: ['s3:GetObject'], + resources: [bucket.arnForObjects('file.txt')], + principals: [new iam.AccountRootPrincipal()], })); ``` @@ -116,7 +116,13 @@ already has a policy attached, let alone to re-use that policy to add more statements to it. We recommend that you always check the result of the call: ```ts -const result = bucket.addToResourcePolicy(...) +const bucket = new s3.Bucket(this, 'MyBucket'); +const result = bucket.addToResourcePolicy(new iam.PolicyStatement({ + actions: ['s3:GetObject'], + resources: [bucket.arnForObjects('file.txt')], + principals: [new iam.AccountRootPrincipal()], +})); + if (!result.statementAdded) { // Uh-oh! Someone probably made a mistake here. } @@ -126,7 +132,8 @@ The bucket policy can be directly accessed after creation to add statements or adjust the removal policy. ```ts -bucket.policy?.applyRemovalPolicy(RemovalPolicy.RETAIN); +const bucket = new s3.Bucket(this, 'MyBucket'); +bucket.policy?.applyRemovalPolicy(cdk.RemovalPolicy.RETAIN); ``` Most of the time, you won't have to manipulate the bucket policy directly. @@ -134,10 +141,10 @@ Instead, buckets have "grant" methods called to give prepackaged sets of permiss to other resources. For example: ```ts -const lambda = new lambda.Function(this, 'Lambda', { /* ... */ }); +declare const myLambda: lambda.Function; -const bucket = new Bucket(this, 'MyBucket'); -bucket.grantReadWrite(lambda); +const bucket = new s3.Bucket(this, 'MyBucket'); +bucket.grantReadWrite(myLambda); ``` Will give the Lambda's execution role permissions to read and write @@ -150,8 +157,8 @@ from the bucket. To require all requests use Secure Socket Layer (SSL): ```ts -const bucket = new Bucket(this, 'Bucket', { - enforceSSL: true +const bucket = new s3.Bucket(this, 'Bucket', { + enforceSSL: true, }); ``` @@ -168,12 +175,13 @@ factory method. This method accepts `BucketAttributes` which describes the prope existing bucket: ```ts -const bucket = Bucket.fromBucketAttributes(this, 'ImportedBucket', { - bucketArn: 'arn:aws:s3:::my-bucket' +declare const myLambda: lambda.Function; +const bucket = s3.Bucket.fromBucketAttributes(this, 'ImportedBucket', { + bucketArn: 'arn:aws:s3:::my-bucket', }); // now you can just call methods on the bucket -bucket.addEventNotification(EventType.OBJECT_CREATED, ...); +bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.LambdaDestination(myLambda), {prefix: 'home/myusername/*'}); ``` Alternatively, short-hand factories are available as `Bucket.fromBucketName` and @@ -181,15 +189,15 @@ Alternatively, short-hand factories are available as `Bucket.fromBucketName` and name or ARN respectively: ```ts -const byName = Bucket.fromBucketName(this, 'BucketByName', 'my-bucket'); -const byArn = Bucket.fromBucketArn(this, 'BucketByArn', 'arn:aws:s3:::my-bucket'); +const byName = s3.Bucket.fromBucketName(this, 'BucketByName', 'my-bucket'); +const byArn = s3.Bucket.fromBucketArn(this, 'BucketByArn', 'arn:aws:s3:::my-bucket'); ``` The bucket's region defaults to the current stack's region, but can also be explicitly set in cases where one of the bucket's regional properties needs to contain the correct values. ```ts -const myCrossRegionBucket = Bucket.fromBucketAttributes(this, 'CrossRegionImport', { +const myCrossRegionBucket = s3.Bucket.fromBucketAttributes(this, 'CrossRegionImport', { bucketArn: 'arn:aws:s3:::my-bucket', region: 'us-east-1', }); @@ -209,8 +217,7 @@ these common use cases. The following example will subscribe an SNS topic to be notified of all `s3:ObjectCreated:*` events: ```ts -import * as s3n from '@aws-cdk/aws-s3-notifications'; - +const bucket = new s3.Bucket(this, 'MyBucket'); const topic = new sns.Topic(this, 'MyTopic'); bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination(topic)); ``` @@ -225,6 +232,8 @@ following example will notify `myQueue` when objects prefixed with `foo/` and have the `.jpg` suffix are removed from the bucket. ```ts +declare const myQueue: sqs.Queue; +const bucket = new s3.Bucket(this, 'MyBucket'); bucket.addEventNotification(s3.EventType.OBJECT_REMOVED, new s3n.SqsDestination(myQueue), { prefix: 'foo/', suffix: '.jpg' }); @@ -233,8 +242,9 @@ bucket.addEventNotification(s3.EventType.OBJECT_REMOVED, Adding notifications on existing buckets: ```ts -const bucket = Bucket.fromBucketAttributes(this, 'ImportedBucket', { - bucketArn: 'arn:aws:s3:::my-bucket' +declare const topic: sns.Topic; +const bucket = s3.Bucket.fromBucketAttributes(this, 'ImportedBucket', { + bucketArn: 'arn:aws:s3:::my-bucket', }); bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination(topic)); ``` @@ -249,24 +259,24 @@ Use `blockPublicAccess` to specify [block public access settings] on the bucket. Enable all block public access settings: ```ts -const bucket = new Bucket(this, 'MyBlockedBucket', { - blockPublicAccess: BlockPublicAccess.BLOCK_ALL +const bucket = new s3.Bucket(this, 'MyBlockedBucket', { + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, }); ``` Block and ignore public ACLs: ```ts -const bucket = new Bucket(this, 'MyBlockedBucket', { - blockPublicAccess: BlockPublicAccess.BLOCK_ACLS +const bucket = new s3.Bucket(this, 'MyBlockedBucket', { + blockPublicAccess: s3.BlockPublicAccess.BLOCK_ACLS, }); ``` Alternatively, specify the settings manually: ```ts -const bucket = new Bucket(this, 'MyBlockedBucket', { - blockPublicAccess: new BlockPublicAccess({ blockPublicPolicy: true }) +const bucket = new s3.Bucket(this, 'MyBlockedBucket', { + blockPublicAccess: new s3.BlockPublicAccess({ blockPublicPolicy: true }), }); ``` @@ -279,9 +289,9 @@ When `blockPublicPolicy` is set to `true`, `grantPublicRead()` throws an error. Use `serverAccessLogsBucket` to describe where server access logs are to be stored. ```ts -const accessLogsBucket = new Bucket(this, 'AccessLogsBucket'); +const accessLogsBucket = new s3.Bucket(this, 'AccessLogsBucket'); -const bucket = new Bucket(this, 'MyBucket', { +const bucket = new s3.Bucket(this, 'MyBucket', { serverAccessLogsBucket: accessLogsBucket, }); ``` @@ -289,9 +299,11 @@ const bucket = new Bucket(this, 'MyBucket', { It's also possible to specify a prefix for Amazon S3 to assign to all log object keys. ```ts -const bucket = new Bucket(this, 'MyBucket', { +const accessLogsBucket = new s3.Bucket(this, 'AccessLogsBucket'); + +const bucket = new s3.Bucket(this, 'MyBucket', { serverAccessLogsBucket: accessLogsBucket, - serverAccessLogsPrefix: 'logs' + serverAccessLogsPrefix: 'logs', }); ``` @@ -322,8 +334,8 @@ const dataBucket = new s3.Bucket(this, 'DataBucket', { bucket: inventoryBucket, prefix: 'with-all-versions', }, - } - ] + }, + ], }); ``` @@ -356,8 +368,8 @@ You can use the two following properties to specify the bucket [redirection poli You can statically redirect a to a given Bucket URL or any other host name with `websiteRedirect`: ```ts -const bucket = new Bucket(this, 'MyRedirectedBucket', { - websiteRedirect: { hostName: 'www.example.com' } +const bucket = new s3.Bucket(this, 'MyRedirectedBucket', { + websiteRedirect: { hostName: 'www.example.com' }, }); ``` @@ -366,17 +378,17 @@ const bucket = new Bucket(this, 'MyRedirectedBucket', { Alternatively, you can also define multiple `websiteRoutingRules`, to define complex, conditional redirections: ```ts -const bucket = new Bucket(this, 'MyRedirectedBucket', { +const bucket = new s3.Bucket(this, 'MyRedirectedBucket', { websiteRoutingRules: [{ hostName: 'www.example.com', httpRedirectCode: '302', - protocol: RedirectProtocol.HTTPS, - replaceKey: ReplaceKey.prefixWith('test/'), + protocol: s3.RedirectProtocol.HTTPS, + replaceKey: s3.ReplaceKey.prefixWith('test/'), condition: { httpErrorCodeReturnedEquals: '200', keyPrefixEquals: 'prefix', - } - }] + }, + }], }); ``` @@ -397,6 +409,7 @@ We recommend to use Virtual Hosted-Style URL for newly made bucket. You can generate both of them. ```ts +const bucket = new s3.Bucket(this, 'MyBucket'); bucket.urlForObject('objectname'); // Path-Style URL bucket.virtualHostedUrlForObject('objectname'); // Virtual Hosted-Style URL bucket.virtualHostedUrlForObject('objectname', { regional: false }); // Virtual Hosted-Style URL but non-regional @@ -440,8 +453,8 @@ To override this and force all objects to get deleted during bucket deletion, enable the`autoDeleteObjects` option. ```ts -const bucket = new Bucket(this, 'MyTempFileBucket', { - removalPolicy: RemovalPolicy.DESTROY, +const bucket = new s3.Bucket(this, 'MyTempFileBucket', { + removalPolicy: cdk.RemovalPolicy.DESTROY, autoDeleteObjects: true, }); ``` diff --git a/packages/@aws-cdk/aws-s3/lib/bucket.ts b/packages/@aws-cdk/aws-s3/lib/bucket.ts index d9065d0719c72..67037f874bc3e 100644 --- a/packages/@aws-cdk/aws-s3/lib/bucket.ts +++ b/packages/@aws-cdk/aws-s3/lib/bucket.ts @@ -309,7 +309,9 @@ export interface IBucket extends IResource { * * @example * - * bucket.addEventNotification(EventType.OnObjectCreated, myLambda, 'home/myusername/*') + * declare const myLambda: lambda.Function; + * const bucket = new s3.Bucket(this, 'MyBucket'); + * bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.LambdaDestination(myLambda), {prefix: 'home/myusername/*'}) * * @see * https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html @@ -319,7 +321,7 @@ export interface IBucket extends IResource { /** * Subscribes a destination to receive notifications when an object is * created in the bucket. This is identical to calling - * `onEvent(EventType.ObjectCreated)`. + * `onEvent(s3.EventType.OBJECT_CREATED)`. * * @param dest The notification destination (see onEvent) * @param filters Filters (see onEvent) @@ -329,7 +331,7 @@ export interface IBucket extends IResource { /** * Subscribes a destination to receive notifications when an object is * removed from the bucket. This is identical to calling - * `onEvent(EventType.ObjectRemoved)`. + * `onEvent(EventType.OBJECT_REMOVED)`. * * @param dest The notification destination (see onEvent) * @param filters Filters (see onEvent) @@ -785,7 +787,9 @@ export abstract class BucketBase extends Resource implements IBucket { * * @example * - * bucket.addEventNotification(EventType.OnObjectCreated, myLambda, 'home/myusername/*') + * declare const myLambda: lambda.Function; + * const bucket = new s3.Bucket(this, 'MyBucket'); + * bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.LambdaDestination(myLambda), {prefix: 'home/myusername/*'}); * * @see * https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html @@ -797,7 +801,7 @@ export abstract class BucketBase extends Resource implements IBucket { /** * Subscribes a destination to receive notifications when an object is * created in the bucket. This is identical to calling - * `onEvent(EventType.ObjectCreated)`. + * `onEvent(EventType.OBJECT_CREATED)`. * * @param dest The notification destination (see onEvent) * @param filters Filters (see onEvent) @@ -809,7 +813,7 @@ export abstract class BucketBase extends Resource implements IBucket { /** * Subscribes a destination to receive notifications when an object is * removed from the bucket. This is identical to calling - * `onEvent(EventType.ObjectRemoved)`. + * `onEvent(EventType.OBJECT_REMOVED)`. * * @param dest The notification destination (see onEvent) * @param filters Filters (see onEvent) diff --git a/packages/@aws-cdk/aws-s3/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-s3/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..de01c16af1e9a --- /dev/null +++ b/packages/@aws-cdk/aws-s3/rosetta/default.ts-fixture @@ -0,0 +1,18 @@ +// Fixture with packages imported, but nothing else +import cdk = require('@aws-cdk/core'); +import s3 = require('@aws-cdk/aws-s3'); +import kms = require('@aws-cdk/aws-kms'); +import iam = require('@aws-cdk/aws-iam'); +import lambda = require('@aws-cdk/aws-lambda'); +import s3n = require('@aws-cdk/aws-s3-notifications'); +import sns = require('@aws-cdk/aws-sns'); +import sqs = require('@aws-cdk/aws-sqs'); +import assert = require('assert'); + +class Fixture extends cdk.Stack { + constructor(scope: cdk.Construct, id: string) { + super(scope, id); + + /// here + } +}